Edit on GitHub

sqlmesh.core.test

  1import difflib
  2import fnmatch
  3import itertools
  4import pathlib
  5import types
  6import typing as t
  7import unittest
  8
  9import pandas as pd
 10import ruamel
 11from sqlglot import Expression, exp, parse_one
 12
 13from sqlmesh.core.engine_adapter import EngineAdapter
 14from sqlmesh.core.snapshot import Snapshot
 15from sqlmesh.utils import unique
 16from sqlmesh.utils.errors import SQLMeshError
 17from sqlmesh.utils.pydantic import PydanticModel
 18from sqlmesh.utils.yaml import load as yaml_load
 19
 20
 21class ModelTestMetadata(PydanticModel):
 22    path: pathlib.Path
 23    test_name: str
 24    body: ruamel.yaml.comments.CommentedMap
 25
 26    @property
 27    def fully_qualified_test_name(self) -> str:
 28        return f"{self.path}::{self.test_name}"
 29
 30    def __hash__(self) -> int:
 31        return self.fully_qualified_test_name.__hash__()
 32
 33
 34class TestError(SQLMeshError):
 35    """Test error"""
 36
 37
 38class ModelTest(unittest.TestCase):
 39    view_names: t.List[str] = []
 40
 41    def __init__(
 42        self,
 43        body: t.Dict[str, t.Any],
 44        test_name: str,
 45        snapshots: t.Dict[str, Snapshot],
 46        engine_adapter: EngineAdapter,
 47        path: t.Optional[pathlib.Path],
 48    ) -> None:
 49        """ModelTest encapsulates a unit test for a model.
 50
 51        Args:
 52            body: A dictionary that contains test metadata like inputs and outputs.
 53            test_name: The name of the test.
 54            snapshots: All snapshots to use for expansion and mapping of physical locations.
 55            engine_adapter: The engine adapter to use.
 56            path: An optional path to the test definition yaml file
 57        """
 58        self.body = body
 59        self.path = path
 60
 61        self.test_name = test_name
 62        self.engine_adapter = engine_adapter
 63
 64        if "model" not in body:
 65            self._raise_error("Incomplete test, missing model name")
 66
 67        if "outputs" not in body:
 68            self._raise_error("Incomplete test, missing outputs")
 69
 70        self.model_name = body["model"]
 71        if self.model_name not in snapshots:
 72            self._raise_error(f"Model '{self.model_name}' was not found")
 73
 74        self.snapshot = snapshots[self.model_name]
 75
 76        inputs = self.body.get("inputs", {})
 77        for snapshot_id in self.snapshot.parents:
 78            if snapshot_id.name not in inputs:
 79                self._raise_error(f"Incomplete test, missing input for table {snapshot_id.name}")
 80
 81        self.query = self.snapshot.model.render_query(**self.body.get("vars", {}))
 82        # For tests we just use the model name for the table reference and we don't want to expand
 83        mapping = {name: _test_fixture_name(name) for name in snapshots}
 84        if mapping:
 85            self.query = exp.replace_tables(self.query, mapping)
 86
 87        self.ctes = {cte.alias: cte for cte in self.query.ctes}
 88
 89        super().__init__()
 90
 91    def setUp(self) -> None:
 92        """Load all input tables"""
 93        inputs = {name: table["rows"] for name, table in self.body.get("inputs", {}).items()}
 94        self.engine_adapter.create_schema(self.snapshot.physical_schema)
 95        for table, rows in inputs.items():
 96            df = pd.DataFrame.from_records(rows)  # noqa
 97            columns_to_types: t.Dict[str, exp.DataType] = {}
 98            for i, v in rows[0].items():
 99                # convert ruamel into python
100                v = v.real if hasattr(v, "real") else v
101                columns_to_types[i] = parse_one(type(v).__name__, into=exp.DataType)  # type: ignore
102            self.engine_adapter.create_schema(table)
103            self.engine_adapter.create_view(_test_fixture_name(table), df, columns_to_types)
104
105    def tearDown(self) -> None:
106        """Drop all input tables"""
107        for table in self.body.get("inputs", {}):
108            self.engine_adapter.drop_view(table)
109
110    def assert_equal(self, df1: pd.DataFrame, df2: pd.DataFrame) -> None:
111        """Compare two DataFrames"""
112        try:
113            pd.testing.assert_frame_equal(
114                df1, df2, check_dtype=False, check_datetimelike_compat=True
115            )
116        except AssertionError as e:
117            diff = "\n".join(
118                difflib.ndiff(
119                    [str(x) for x in df1.to_dict("records")],
120                    [str(x) for x in df2.to_dict("records")],
121                )
122            )
123            e.args = (f"Data differs\n{diff}",)
124            raise e
125
126    def execute(self, query: Expression) -> pd.DataFrame:
127        """Execute the query with the engine adapter and return a DataFrame"""
128        return self.engine_adapter.fetchdf(query)
129
130    def test_ctes(self) -> None:
131        """Run CTE queries and compare output to expected output"""
132        for cte_name, value in self.body["outputs"].get("ctes", {}).items():
133            with self.subTest(cte=cte_name):
134                if cte_name not in self.ctes:
135                    self._raise_error(f"No CTE named {cte_name} found in model {self.model_name}")
136                expected_df = pd.DataFrame.from_records(value["rows"])
137                actual_df = self.execute(self.ctes[cte_name].this)
138                self.assert_equal(expected_df, actual_df)
139
140    def runTest(self) -> None:
141        self.test_ctes()
142
143        # Test model query
144        if "rows" in self.body["outputs"].get("query", {}):
145            expected_df = pd.DataFrame.from_records(self.body["outputs"]["query"]["rows"])
146            actual_df = self.execute(self.query)
147            self.assert_equal(expected_df, actual_df)
148
149    def __str__(self) -> str:
150        return f"{self.test_name} ({self.path}:{self.body.lc.line})"  # type: ignore
151
152    def _raise_error(self, msg: str) -> None:
153        raise TestError(f"{msg} at {self.path}")
154
155
156class ModelTextTestResult(unittest.TextTestResult):
157    def addFailure(
158        self,
159        test: unittest.TestCase,
160        err: t.Union[
161            t.Tuple[t.Type[BaseException], BaseException, types.TracebackType],
162            t.Tuple[None, None, None],
163        ],
164    ) -> None:
165        """Called when the test case test signals a failure.
166
167        The traceback is suppressed because it is redundant and not useful.
168
169        Args:
170            test: The test case.
171            err: A tuple of the form returned by sys.exc_info(), i.e., (type, value, traceback).
172        """
173        exctype, value, tb = err
174        return super().addFailure(test, (exctype, value, None))  # type: ignore
175
176
177def load_model_test_file(
178    path: pathlib.Path,
179) -> t.Dict[str, ModelTestMetadata]:
180    """Load a single model test file.
181
182    Args:
183        path: The path to the test file
184
185    returns:
186        A list of ModelTestMetadata named tuples.
187    """
188    model_test_metadata = {}
189    contents = yaml_load(path)
190
191    for test_name, value in contents.items():
192        model_test_metadata[test_name] = ModelTestMetadata(
193            path=path, test_name=test_name, body=value
194        )
195    return model_test_metadata
196
197
198def discover_model_tests(
199    path: pathlib.Path, ignore_patterns: t.Optional[t.List[str]] = None
200) -> t.Generator[ModelTestMetadata, None, None]:
201    """Discover model tests.
202
203    Model tests are defined in YAML files and contain the inputs and outputs used to test model queries.
204
205    Args:
206        path: A path to search for tests.
207        ignore_patterns: An optional list of patterns to ignore.
208
209    Returns:
210        A list of ModelTestMetadata named tuples.
211    """
212    search_path = pathlib.Path(path)
213
214    for yaml_file in itertools.chain(
215        search_path.glob("**/test*.yaml"),
216        search_path.glob("**/test*.yml"),
217    ):
218        for ignore_pattern in ignore_patterns or []:
219            if yaml_file.match(ignore_pattern):
220                break
221        else:
222            for model_test_metadata in load_model_test_file(yaml_file).values():
223                yield model_test_metadata
224
225
226def filter_tests_by_patterns(
227    tests: t.List[ModelTestMetadata], patterns: t.List[str]
228) -> t.List[ModelTestMetadata]:
229    """Filter out tests whose filename or name does not match a pattern.
230
231    Args:
232        tests: A list of ModelTestMetadata named tuples to match.
233        patterns: A list of patterns to match against.
234
235    Returns:
236        A list of ModelTestMetadata named tuples.
237    """
238    return unique(
239        test
240        for test, pattern in itertools.product(tests, patterns)
241        if ("*" in pattern and fnmatch.fnmatchcase(test.fully_qualified_test_name, pattern))
242        or pattern in test.fully_qualified_test_name
243    )
244
245
246def run_tests(
247    model_test_metadata: t.List[ModelTestMetadata],
248    snapshots: t.Dict[str, Snapshot],
249    engine_adapter: EngineAdapter,
250    verbosity: int = 1,
251) -> unittest.result.TestResult:
252    """Create a test suite of ModelTest objects and run it.
253
254    Args:
255        model_test_metadata: A list of ModelTestMetadata named tuples.
256        snapshots: All snapshots to use for expansion and mapping of physical locations.
257        engine_adapter: The engine adapter to use.
258        patterns: A list of patterns to match against.
259        verbosity: The verbosity level.
260    """
261    suite = unittest.TestSuite(
262        ModelTest(
263            body=metadata.body,
264            test_name=metadata.test_name,
265            snapshots=snapshots,
266            engine_adapter=engine_adapter,
267            path=metadata.path,
268        )
269        for metadata in model_test_metadata
270    )
271    return unittest.TextTestRunner(verbosity=verbosity, resultclass=ModelTextTestResult).run(suite)
272
273
274def get_all_model_tests(
275    path: pathlib.Path,
276    patterns: t.Optional[t.List[str]] = None,
277    ignore_patterns: t.Optional[t.List[str]] = None,
278) -> t.List[ModelTestMetadata]:
279    model_test_metadatas = list(discover_model_tests(pathlib.Path(path), ignore_patterns))
280    if patterns:
281        model_test_metadatas = filter_tests_by_patterns(model_test_metadatas, patterns)
282    return model_test_metadatas
283
284
285def run_all_model_tests(
286    path: pathlib.Path,
287    snapshots: t.Dict[str, Snapshot],
288    engine_adapter: EngineAdapter,
289    verbosity: int = 1,
290    patterns: t.Optional[t.List[str]] = None,
291    ignore_patterns: t.Optional[t.List[str]] = None,
292) -> unittest.result.TestResult:
293    """Discover and run all model tests found in path.
294
295    Args:
296        path: A path to search for tests.
297        snapshots: All snapshots to use for expansion and mapping of physical locations.
298        engine_adapter: The engine adapter to use.
299        verbosity: The verbosity level.
300        patterns: A list of patterns to match against.
301        ignore_patterns: An optional list of patterns to ignore.
302    """
303    model_tests = get_all_model_tests(path, patterns, ignore_patterns)
304    return run_tests(model_tests, snapshots, engine_adapter, verbosity)
305
306
307def run_model_tests(
308    tests: t.List[str],
309    snapshots: t.Dict[str, Snapshot],
310    engine_adapter: EngineAdapter,
311    verbosity: int = 1,
312    patterns: t.Optional[t.List[str]] = None,
313    ignore_patterns: t.Optional[t.List[str]] = None,
314) -> unittest.result.TestResult:
315    """Load and run tests.
316
317    Args
318        tests: A list of tests to run, e.g. [tests/test_orders.yaml::test_single_order]
319        snapshots: All snapshots to use for expansion and mapping of physical locations.
320        engine_adapter: The engine adapter to use.
321        patterns: A list of patterns to match against.
322        verbosity: The verbosity level.
323        ignore_patterns: An optional list of patterns to ignore.
324    """
325    loaded_tests = []
326    for test in tests:
327        filename, test_name = test.split("::", maxsplit=1) if "::" in test else (test, "")
328        path = pathlib.Path(filename)
329        for ignore_pattern in ignore_patterns or []:
330            if path.match(ignore_pattern):
331                break
332        else:
333            if test_name:
334                loaded_tests.append(load_model_test_file(path)[test_name])
335            else:
336                loaded_tests.extend(load_model_test_file(path).values())
337    if patterns:
338        loaded_tests = filter_tests_by_patterns(loaded_tests, patterns)
339    return run_tests(loaded_tests, snapshots, engine_adapter, verbosity)
340
341
342def _test_fixture_name(name: str) -> str:
343    return f"{name}__fixture"
class ModelTestMetadata(sqlmesh.utils.pydantic.PydanticModel):
22class ModelTestMetadata(PydanticModel):
23    path: pathlib.Path
24    test_name: str
25    body: ruamel.yaml.comments.CommentedMap
26
27    @property
28    def fully_qualified_test_name(self) -> str:
29        return f"{self.path}::{self.test_name}"
30
31    def __hash__(self) -> int:
32        return self.fully_qualified_test_name.__hash__()
Inherited Members
pydantic.main.BaseModel
BaseModel
parse_obj
parse_raw
parse_file
from_orm
construct
copy
schema
schema_json
validate
update_forward_refs
sqlmesh.utils.pydantic.PydanticModel
Config
dict
json
missing_required_fields
extra_fields
all_fields
required_fields
class TestError(sqlmesh.utils.errors.SQLMeshError):
35class TestError(SQLMeshError):
36    """Test error"""

Test error

Inherited Members
builtins.Exception
Exception
builtins.BaseException
with_traceback
class ModelTest(unittest.case.TestCase):
 39class ModelTest(unittest.TestCase):
 40    view_names: t.List[str] = []
 41
 42    def __init__(
 43        self,
 44        body: t.Dict[str, t.Any],
 45        test_name: str,
 46        snapshots: t.Dict[str, Snapshot],
 47        engine_adapter: EngineAdapter,
 48        path: t.Optional[pathlib.Path],
 49    ) -> None:
 50        """ModelTest encapsulates a unit test for a model.
 51
 52        Args:
 53            body: A dictionary that contains test metadata like inputs and outputs.
 54            test_name: The name of the test.
 55            snapshots: All snapshots to use for expansion and mapping of physical locations.
 56            engine_adapter: The engine adapter to use.
 57            path: An optional path to the test definition yaml file
 58        """
 59        self.body = body
 60        self.path = path
 61
 62        self.test_name = test_name
 63        self.engine_adapter = engine_adapter
 64
 65        if "model" not in body:
 66            self._raise_error("Incomplete test, missing model name")
 67
 68        if "outputs" not in body:
 69            self._raise_error("Incomplete test, missing outputs")
 70
 71        self.model_name = body["model"]
 72        if self.model_name not in snapshots:
 73            self._raise_error(f"Model '{self.model_name}' was not found")
 74
 75        self.snapshot = snapshots[self.model_name]
 76
 77        inputs = self.body.get("inputs", {})
 78        for snapshot_id in self.snapshot.parents:
 79            if snapshot_id.name not in inputs:
 80                self._raise_error(f"Incomplete test, missing input for table {snapshot_id.name}")
 81
 82        self.query = self.snapshot.model.render_query(**self.body.get("vars", {}))
 83        # For tests we just use the model name for the table reference and we don't want to expand
 84        mapping = {name: _test_fixture_name(name) for name in snapshots}
 85        if mapping:
 86            self.query = exp.replace_tables(self.query, mapping)
 87
 88        self.ctes = {cte.alias: cte for cte in self.query.ctes}
 89
 90        super().__init__()
 91
 92    def setUp(self) -> None:
 93        """Load all input tables"""
 94        inputs = {name: table["rows"] for name, table in self.body.get("inputs", {}).items()}
 95        self.engine_adapter.create_schema(self.snapshot.physical_schema)
 96        for table, rows in inputs.items():
 97            df = pd.DataFrame.from_records(rows)  # noqa
 98            columns_to_types: t.Dict[str, exp.DataType] = {}
 99            for i, v in rows[0].items():
100                # convert ruamel into python
101                v = v.real if hasattr(v, "real") else v
102                columns_to_types[i] = parse_one(type(v).__name__, into=exp.DataType)  # type: ignore
103            self.engine_adapter.create_schema(table)
104            self.engine_adapter.create_view(_test_fixture_name(table), df, columns_to_types)
105
106    def tearDown(self) -> None:
107        """Drop all input tables"""
108        for table in self.body.get("inputs", {}):
109            self.engine_adapter.drop_view(table)
110
111    def assert_equal(self, df1: pd.DataFrame, df2: pd.DataFrame) -> None:
112        """Compare two DataFrames"""
113        try:
114            pd.testing.assert_frame_equal(
115                df1, df2, check_dtype=False, check_datetimelike_compat=True
116            )
117        except AssertionError as e:
118            diff = "\n".join(
119                difflib.ndiff(
120                    [str(x) for x in df1.to_dict("records")],
121                    [str(x) for x in df2.to_dict("records")],
122                )
123            )
124            e.args = (f"Data differs\n{diff}",)
125            raise e
126
127    def execute(self, query: Expression) -> pd.DataFrame:
128        """Execute the query with the engine adapter and return a DataFrame"""
129        return self.engine_adapter.fetchdf(query)
130
131    def test_ctes(self) -> None:
132        """Run CTE queries and compare output to expected output"""
133        for cte_name, value in self.body["outputs"].get("ctes", {}).items():
134            with self.subTest(cte=cte_name):
135                if cte_name not in self.ctes:
136                    self._raise_error(f"No CTE named {cte_name} found in model {self.model_name}")
137                expected_df = pd.DataFrame.from_records(value["rows"])
138                actual_df = self.execute(self.ctes[cte_name].this)
139                self.assert_equal(expected_df, actual_df)
140
141    def runTest(self) -> None:
142        self.test_ctes()
143
144        # Test model query
145        if "rows" in self.body["outputs"].get("query", {}):
146            expected_df = pd.DataFrame.from_records(self.body["outputs"]["query"]["rows"])
147            actual_df = self.execute(self.query)
148            self.assert_equal(expected_df, actual_df)
149
150    def __str__(self) -> str:
151        return f"{self.test_name} ({self.path}:{self.body.lc.line})"  # type: ignore
152
153    def _raise_error(self, msg: str) -> None:
154        raise TestError(f"{msg} at {self.path}")

A class whose instances are single test cases.

By default, the test code itself should be placed in a method named 'runTest'.

If the fixture may be used for many test cases, create as many test methods as are needed. When instantiating such a TestCase subclass, specify in the constructor arguments the name of the test method that the instance is to execute.

Test authors should subclass TestCase for their own tests. Construction and deconstruction of the test's environment ('fixture') can be implemented by overriding the 'setUp' and 'tearDown' methods respectively.

If it is necessary to override the __init__ method, the base class __init__ method must always be called. It is important that subclasses should not change the signature of their __init__ method, since instances of the classes are instantiated automatically by parts of the framework in order to be run.

When subclassing TestCase, you can set these attributes:

  • failureException: determines which exception will be raised when the instance's assertion methods fail; test methods raising this exception will be deemed to have 'failed' rather than 'errored'.
  • longMessage: determines whether long messages (including repr of objects used in assert methods) will be printed on failure in addition to any explicit message passed.
  • maxDiff: sets the maximum length of a diff in failure messages by assert methods using difflib. It is looked up as an instance attribute so can be configured by individual tests if required.
ModelTest( body: Dict[str, Any], test_name: str, snapshots: Dict[str, sqlmesh.core.snapshot.definition.Snapshot], engine_adapter: sqlmesh.core.engine_adapter.base.EngineAdapter, path: Optional[pathlib.Path])
42    def __init__(
43        self,
44        body: t.Dict[str, t.Any],
45        test_name: str,
46        snapshots: t.Dict[str, Snapshot],
47        engine_adapter: EngineAdapter,
48        path: t.Optional[pathlib.Path],
49    ) -> None:
50        """ModelTest encapsulates a unit test for a model.
51
52        Args:
53            body: A dictionary that contains test metadata like inputs and outputs.
54            test_name: The name of the test.
55            snapshots: All snapshots to use for expansion and mapping of physical locations.
56            engine_adapter: The engine adapter to use.
57            path: An optional path to the test definition yaml file
58        """
59        self.body = body
60        self.path = path
61
62        self.test_name = test_name
63        self.engine_adapter = engine_adapter
64
65        if "model" not in body:
66            self._raise_error("Incomplete test, missing model name")
67
68        if "outputs" not in body:
69            self._raise_error("Incomplete test, missing outputs")
70
71        self.model_name = body["model"]
72        if self.model_name not in snapshots:
73            self._raise_error(f"Model '{self.model_name}' was not found")
74
75        self.snapshot = snapshots[self.model_name]
76
77        inputs = self.body.get("inputs", {})
78        for snapshot_id in self.snapshot.parents:
79            if snapshot_id.name not in inputs:
80                self._raise_error(f"Incomplete test, missing input for table {snapshot_id.name}")
81
82        self.query = self.snapshot.model.render_query(**self.body.get("vars", {}))
83        # For tests we just use the model name for the table reference and we don't want to expand
84        mapping = {name: _test_fixture_name(name) for name in snapshots}
85        if mapping:
86            self.query = exp.replace_tables(self.query, mapping)
87
88        self.ctes = {cte.alias: cte for cte in self.query.ctes}
89
90        super().__init__()

ModelTest encapsulates a unit test for a model.

Arguments:
  • body: A dictionary that contains test metadata like inputs and outputs.
  • test_name: The name of the test.
  • snapshots: All snapshots to use for expansion and mapping of physical locations.
  • engine_adapter: The engine adapter to use.
  • path: An optional path to the test definition yaml file
def setUp(self) -> None:
 92    def setUp(self) -> None:
 93        """Load all input tables"""
 94        inputs = {name: table["rows"] for name, table in self.body.get("inputs", {}).items()}
 95        self.engine_adapter.create_schema(self.snapshot.physical_schema)
 96        for table, rows in inputs.items():
 97            df = pd.DataFrame.from_records(rows)  # noqa
 98            columns_to_types: t.Dict[str, exp.DataType] = {}
 99            for i, v in rows[0].items():
100                # convert ruamel into python
101                v = v.real if hasattr(v, "real") else v
102                columns_to_types[i] = parse_one(type(v).__name__, into=exp.DataType)  # type: ignore
103            self.engine_adapter.create_schema(table)
104            self.engine_adapter.create_view(_test_fixture_name(table), df, columns_to_types)

Load all input tables

def tearDown(self) -> None:
106    def tearDown(self) -> None:
107        """Drop all input tables"""
108        for table in self.body.get("inputs", {}):
109            self.engine_adapter.drop_view(table)

Drop all input tables

def assert_equal( self, df1: pandas.core.frame.DataFrame, df2: pandas.core.frame.DataFrame) -> None:
111    def assert_equal(self, df1: pd.DataFrame, df2: pd.DataFrame) -> None:
112        """Compare two DataFrames"""
113        try:
114            pd.testing.assert_frame_equal(
115                df1, df2, check_dtype=False, check_datetimelike_compat=True
116            )
117        except AssertionError as e:
118            diff = "\n".join(
119                difflib.ndiff(
120                    [str(x) for x in df1.to_dict("records")],
121                    [str(x) for x in df2.to_dict("records")],
122                )
123            )
124            e.args = (f"Data differs\n{diff}",)
125            raise e

Compare two DataFrames

def execute( self, query: sqlglot.expressions.Expression) -> pandas.core.frame.DataFrame:
127    def execute(self, query: Expression) -> pd.DataFrame:
128        """Execute the query with the engine adapter and return a DataFrame"""
129        return self.engine_adapter.fetchdf(query)

Execute the query with the engine adapter and return a DataFrame

def test_ctes(self) -> None:
131    def test_ctes(self) -> None:
132        """Run CTE queries and compare output to expected output"""
133        for cte_name, value in self.body["outputs"].get("ctes", {}).items():
134            with self.subTest(cte=cte_name):
135                if cte_name not in self.ctes:
136                    self._raise_error(f"No CTE named {cte_name} found in model {self.model_name}")
137                expected_df = pd.DataFrame.from_records(value["rows"])
138                actual_df = self.execute(self.ctes[cte_name].this)
139                self.assert_equal(expected_df, actual_df)

Run CTE queries and compare output to expected output

def runTest(self) -> None:
141    def runTest(self) -> None:
142        self.test_ctes()
143
144        # Test model query
145        if "rows" in self.body["outputs"].get("query", {}):
146            expected_df = pd.DataFrame.from_records(self.body["outputs"]["query"]["rows"])
147            actual_df = self.execute(self.query)
148            self.assert_equal(expected_df, actual_df)
Inherited Members
unittest.case.TestCase
addTypeEqualityFunc
addCleanup
addClassCleanup
setUpClass
tearDownClass
countTestCases
defaultTestResult
shortDescription
id
subTest
run
doCleanups
doClassCleanups
debug
skipTest
fail
assertFalse
assertTrue
assertRaises
assertWarns
assertLogs
assertNoLogs
assertEqual
assertNotEqual
assertAlmostEqual
assertNotAlmostEqual
assertSequenceEqual
assertListEqual
assertTupleEqual
assertSetEqual
assertIn
assertNotIn
assertIs
assertIsNot
assertDictEqual
assertDictContainsSubset
assertCountEqual
assertMultiLineEqual
assertLess
assertLessEqual
assertGreater
assertGreaterEqual
assertIsNone
assertIsNotNone
assertIsInstance
assertNotIsInstance
assertRaisesRegex
assertWarnsRegex
assertRegex
assertNotRegex
failUnlessRaises
failIf
assertRaisesRegexp
assertRegexpMatches
assertNotRegexpMatches
failUnlessEqual
assertEquals
failIfEqual
assertNotEquals
failUnlessAlmostEqual
assertAlmostEquals
failIfAlmostEqual
assertNotAlmostEquals
failUnless
assert_
class ModelTextTestResult(unittest.runner.TextTestResult):
157class ModelTextTestResult(unittest.TextTestResult):
158    def addFailure(
159        self,
160        test: unittest.TestCase,
161        err: t.Union[
162            t.Tuple[t.Type[BaseException], BaseException, types.TracebackType],
163            t.Tuple[None, None, None],
164        ],
165    ) -> None:
166        """Called when the test case test signals a failure.
167
168        The traceback is suppressed because it is redundant and not useful.
169
170        Args:
171            test: The test case.
172            err: A tuple of the form returned by sys.exc_info(), i.e., (type, value, traceback).
173        """
174        exctype, value, tb = err
175        return super().addFailure(test, (exctype, value, None))  # type: ignore

A test result class that can print formatted text results to a stream.

Used by TextTestRunner.

def addFailure( self, test: unittest.case.TestCase, err: Union[Tuple[Type[BaseException], BaseException, traceback], Tuple[NoneType, NoneType, NoneType]]) -> None:
158    def addFailure(
159        self,
160        test: unittest.TestCase,
161        err: t.Union[
162            t.Tuple[t.Type[BaseException], BaseException, types.TracebackType],
163            t.Tuple[None, None, None],
164        ],
165    ) -> None:
166        """Called when the test case test signals a failure.
167
168        The traceback is suppressed because it is redundant and not useful.
169
170        Args:
171            test: The test case.
172            err: A tuple of the form returned by sys.exc_info(), i.e., (type, value, traceback).
173        """
174        exctype, value, tb = err
175        return super().addFailure(test, (exctype, value, None))  # type: ignore

Called when the test case test signals a failure.

The traceback is suppressed because it is redundant and not useful.

Arguments:
  • test: The test case.
  • err: A tuple of the form returned by sys.exc_info(), i.e., (type, value, traceback).
Inherited Members
unittest.runner.TextTestResult
TextTestResult
getDescription
startTest
addSuccess
addError
addSkip
addExpectedFailure
addUnexpectedSuccess
printErrors
printErrorList
unittest.result.TestResult
startTestRun
stopTest
stopTestRun
addSubTest
wasSuccessful
stop
def load_model_test_file(path: pathlib.Path) -> Dict[str, sqlmesh.core.test.ModelTestMetadata]:
178def load_model_test_file(
179    path: pathlib.Path,
180) -> t.Dict[str, ModelTestMetadata]:
181    """Load a single model test file.
182
183    Args:
184        path: The path to the test file
185
186    returns:
187        A list of ModelTestMetadata named tuples.
188    """
189    model_test_metadata = {}
190    contents = yaml_load(path)
191
192    for test_name, value in contents.items():
193        model_test_metadata[test_name] = ModelTestMetadata(
194            path=path, test_name=test_name, body=value
195        )
196    return model_test_metadata

Load a single model test file.

Arguments:
  • path: The path to the test file

returns: A list of ModelTestMetadata named tuples.

def discover_model_tests( path: pathlib.Path, ignore_patterns: Optional[List[str]] = None) -> Generator[sqlmesh.core.test.ModelTestMetadata, NoneType, NoneType]:
199def discover_model_tests(
200    path: pathlib.Path, ignore_patterns: t.Optional[t.List[str]] = None
201) -> t.Generator[ModelTestMetadata, None, None]:
202    """Discover model tests.
203
204    Model tests are defined in YAML files and contain the inputs and outputs used to test model queries.
205
206    Args:
207        path: A path to search for tests.
208        ignore_patterns: An optional list of patterns to ignore.
209
210    Returns:
211        A list of ModelTestMetadata named tuples.
212    """
213    search_path = pathlib.Path(path)
214
215    for yaml_file in itertools.chain(
216        search_path.glob("**/test*.yaml"),
217        search_path.glob("**/test*.yml"),
218    ):
219        for ignore_pattern in ignore_patterns or []:
220            if yaml_file.match(ignore_pattern):
221                break
222        else:
223            for model_test_metadata in load_model_test_file(yaml_file).values():
224                yield model_test_metadata

Discover model tests.

Model tests are defined in YAML files and contain the inputs and outputs used to test model queries.

Arguments:
  • path: A path to search for tests.
  • ignore_patterns: An optional list of patterns to ignore.
Returns:

A list of ModelTestMetadata named tuples.

def filter_tests_by_patterns( tests: List[sqlmesh.core.test.ModelTestMetadata], patterns: List[str]) -> List[sqlmesh.core.test.ModelTestMetadata]:
227def filter_tests_by_patterns(
228    tests: t.List[ModelTestMetadata], patterns: t.List[str]
229) -> t.List[ModelTestMetadata]:
230    """Filter out tests whose filename or name does not match a pattern.
231
232    Args:
233        tests: A list of ModelTestMetadata named tuples to match.
234        patterns: A list of patterns to match against.
235
236    Returns:
237        A list of ModelTestMetadata named tuples.
238    """
239    return unique(
240        test
241        for test, pattern in itertools.product(tests, patterns)
242        if ("*" in pattern and fnmatch.fnmatchcase(test.fully_qualified_test_name, pattern))
243        or pattern in test.fully_qualified_test_name
244    )

Filter out tests whose filename or name does not match a pattern.

Arguments:
  • tests: A list of ModelTestMetadata named tuples to match.
  • patterns: A list of patterns to match against.
Returns:

A list of ModelTestMetadata named tuples.

def run_tests( model_test_metadata: List[sqlmesh.core.test.ModelTestMetadata], snapshots: Dict[str, sqlmesh.core.snapshot.definition.Snapshot], engine_adapter: sqlmesh.core.engine_adapter.base.EngineAdapter, verbosity: int = 1) -> unittest.result.TestResult:
247def run_tests(
248    model_test_metadata: t.List[ModelTestMetadata],
249    snapshots: t.Dict[str, Snapshot],
250    engine_adapter: EngineAdapter,
251    verbosity: int = 1,
252) -> unittest.result.TestResult:
253    """Create a test suite of ModelTest objects and run it.
254
255    Args:
256        model_test_metadata: A list of ModelTestMetadata named tuples.
257        snapshots: All snapshots to use for expansion and mapping of physical locations.
258        engine_adapter: The engine adapter to use.
259        patterns: A list of patterns to match against.
260        verbosity: The verbosity level.
261    """
262    suite = unittest.TestSuite(
263        ModelTest(
264            body=metadata.body,
265            test_name=metadata.test_name,
266            snapshots=snapshots,
267            engine_adapter=engine_adapter,
268            path=metadata.path,
269        )
270        for metadata in model_test_metadata
271    )
272    return unittest.TextTestRunner(verbosity=verbosity, resultclass=ModelTextTestResult).run(suite)

Create a test suite of ModelTest objects and run it.

Arguments:
  • model_test_metadata: A list of ModelTestMetadata named tuples.
  • snapshots: All snapshots to use for expansion and mapping of physical locations.
  • engine_adapter: The engine adapter to use.
  • patterns: A list of patterns to match against.
  • verbosity: The verbosity level.
def get_all_model_tests( path: pathlib.Path, patterns: Optional[List[str]] = None, ignore_patterns: Optional[List[str]] = None) -> List[sqlmesh.core.test.ModelTestMetadata]:
275def get_all_model_tests(
276    path: pathlib.Path,
277    patterns: t.Optional[t.List[str]] = None,
278    ignore_patterns: t.Optional[t.List[str]] = None,
279) -> t.List[ModelTestMetadata]:
280    model_test_metadatas = list(discover_model_tests(pathlib.Path(path), ignore_patterns))
281    if patterns:
282        model_test_metadatas = filter_tests_by_patterns(model_test_metadatas, patterns)
283    return model_test_metadatas
def run_all_model_tests( path: pathlib.Path, snapshots: Dict[str, sqlmesh.core.snapshot.definition.Snapshot], engine_adapter: sqlmesh.core.engine_adapter.base.EngineAdapter, verbosity: int = 1, patterns: Optional[List[str]] = None, ignore_patterns: Optional[List[str]] = None) -> unittest.result.TestResult:
286def run_all_model_tests(
287    path: pathlib.Path,
288    snapshots: t.Dict[str, Snapshot],
289    engine_adapter: EngineAdapter,
290    verbosity: int = 1,
291    patterns: t.Optional[t.List[str]] = None,
292    ignore_patterns: t.Optional[t.List[str]] = None,
293) -> unittest.result.TestResult:
294    """Discover and run all model tests found in path.
295
296    Args:
297        path: A path to search for tests.
298        snapshots: All snapshots to use for expansion and mapping of physical locations.
299        engine_adapter: The engine adapter to use.
300        verbosity: The verbosity level.
301        patterns: A list of patterns to match against.
302        ignore_patterns: An optional list of patterns to ignore.
303    """
304    model_tests = get_all_model_tests(path, patterns, ignore_patterns)
305    return run_tests(model_tests, snapshots, engine_adapter, verbosity)

Discover and run all model tests found in path.

Arguments:
  • path: A path to search for tests.
  • snapshots: All snapshots to use for expansion and mapping of physical locations.
  • engine_adapter: The engine adapter to use.
  • verbosity: The verbosity level.
  • patterns: A list of patterns to match against.
  • ignore_patterns: An optional list of patterns to ignore.
def run_model_tests( tests: List[str], snapshots: Dict[str, sqlmesh.core.snapshot.definition.Snapshot], engine_adapter: sqlmesh.core.engine_adapter.base.EngineAdapter, verbosity: int = 1, patterns: Optional[List[str]] = None, ignore_patterns: Optional[List[str]] = None) -> unittest.result.TestResult:
308def run_model_tests(
309    tests: t.List[str],
310    snapshots: t.Dict[str, Snapshot],
311    engine_adapter: EngineAdapter,
312    verbosity: int = 1,
313    patterns: t.Optional[t.List[str]] = None,
314    ignore_patterns: t.Optional[t.List[str]] = None,
315) -> unittest.result.TestResult:
316    """Load and run tests.
317
318    Args
319        tests: A list of tests to run, e.g. [tests/test_orders.yaml::test_single_order]
320        snapshots: All snapshots to use for expansion and mapping of physical locations.
321        engine_adapter: The engine adapter to use.
322        patterns: A list of patterns to match against.
323        verbosity: The verbosity level.
324        ignore_patterns: An optional list of patterns to ignore.
325    """
326    loaded_tests = []
327    for test in tests:
328        filename, test_name = test.split("::", maxsplit=1) if "::" in test else (test, "")
329        path = pathlib.Path(filename)
330        for ignore_pattern in ignore_patterns or []:
331            if path.match(ignore_pattern):
332                break
333        else:
334            if test_name:
335                loaded_tests.append(load_model_test_file(path)[test_name])
336            else:
337                loaded_tests.extend(load_model_test_file(path).values())
338    if patterns:
339        loaded_tests = filter_tests_by_patterns(loaded_tests, patterns)
340    return run_tests(loaded_tests, snapshots, engine_adapter, verbosity)

Load and run tests.

Args tests: A list of tests to run, e.g. [tests/test_orders.yaml::test_single_order] snapshots: All snapshots to use for expansion and mapping of physical locations. engine_adapter: The engine adapter to use. patterns: A list of patterns to match against. verbosity: The verbosity level. ignore_patterns: An optional list of patterns to ignore.