sqlmesh.core.console
1from __future__ import annotations 2 3import abc 4import datetime 5import typing as t 6import unittest 7import uuid 8 9from hyperscript import h 10from rich.console import Console as RichConsole 11from rich.progress import BarColumn, Progress, TaskID, TextColumn, TimeElapsedColumn 12from rich.prompt import Confirm, Prompt 13from rich.status import Status 14from rich.syntax import Syntax 15from rich.tree import Tree 16 17from sqlmesh.core.snapshot import Snapshot, SnapshotChangeCategory 18from sqlmesh.core.test import ModelTest 19from sqlmesh.utils import rich as srich 20from sqlmesh.utils.date import to_date 21 22if t.TYPE_CHECKING: 23 import ipywidgets as widgets 24 25 from sqlmesh.core.context_diff import ContextDiff 26 from sqlmesh.core.plan import Plan 27 28 LayoutWidget = t.TypeVar("LayoutWidget", bound=t.Union[widgets.VBox, widgets.HBox]) 29 30 31SNAPSHOT_CHANGE_CATEGORY_STR = { 32 SnapshotChangeCategory.BREAKING: "Breaking", 33 SnapshotChangeCategory.NON_BREAKING: "Non-breaking", 34 SnapshotChangeCategory.FORWARD_ONLY: "Forward-only", 35} 36 37 38class Console(abc.ABC): 39 """Abstract base class for defining classes used for displaying information to the user and also interact 40 with them when their input is needed""" 41 42 @abc.abstractmethod 43 def start_snapshot_progress(self, snapshot_name: str, total_batches: int) -> None: 44 """Indicates that a new load progress has begun.""" 45 46 @abc.abstractmethod 47 def update_snapshot_progress(self, snapshot_name: str, num_batches: int) -> None: 48 """Update snapshot progress.""" 49 50 @abc.abstractmethod 51 def stop_snapshot_progress(self, success: bool = True) -> None: 52 """Stop the load progress""" 53 54 @abc.abstractmethod 55 def start_promotion_progress(self, environment: str, total_tasks: int) -> None: 56 """Indicates that a new promotion progress has begun.""" 57 58 @abc.abstractmethod 59 def update_promotion_progress(self, num_tasks: int) -> None: 60 """Update promotion progress.""" 61 62 @abc.abstractmethod 63 def stop_promotion_progress(self, success: bool = True) -> None: 64 """Stop the promotion progress""" 65 66 @abc.abstractmethod 67 def show_model_difference_summary( 68 self, context_diff: ContextDiff, detailed: bool = False 69 ) -> None: 70 """Displays a summary of differences for the given models""" 71 72 @abc.abstractmethod 73 def plan(self, plan: Plan, auto_apply: bool) -> None: 74 """The main plan flow. 75 76 The console should present the user with choices on how to backfill and version the snapshots 77 of a plan. 78 79 Args: 80 plan: The plan to make choices for. 81 auto_apply: Whether to automatically apply the plan after all choices have been made. 82 """ 83 84 @abc.abstractmethod 85 def log_test_results( 86 self, result: unittest.result.TestResult, output: str, target_dialect: str 87 ) -> None: 88 """Display the test result and output 89 90 Args: 91 result: The unittest test result that contains metrics like num success, fails, ect. 92 output: The generated output from the unittest 93 target_dialect: The dialect that tests were run against. Assumes all tests run against the same dialect. 94 """ 95 96 @abc.abstractmethod 97 def show_sql(self, sql: str) -> None: 98 """Display to the user SQL""" 99 100 @abc.abstractmethod 101 def log_status_update(self, message: str) -> None: 102 """Display general status update to the user""" 103 104 @abc.abstractmethod 105 def log_error(self, message: str) -> None: 106 """Display error info to the user""" 107 108 @abc.abstractmethod 109 def log_success(self, message: str) -> None: 110 """Display a general successful message to the user""" 111 112 @abc.abstractmethod 113 def loading_start(self, message: t.Optional[str] = None) -> uuid.UUID: 114 """Starts loading and returns a unique ID that can be used to stop the loading. Optionally can display a message""" 115 116 @abc.abstractmethod 117 def loading_stop(self, id: uuid.UUID) -> None: 118 """Stop loading for the given id""" 119 120 121class TerminalConsole(Console): 122 """A rich based implementation of the console""" 123 124 def __init__(self, console: t.Optional[RichConsole] = None, **kwargs: t.Any) -> None: 125 self.console: RichConsole = console or srich.console 126 self.evaluation_progress: t.Optional[Progress] = None 127 self.evaluation_tasks: t.Dict[str, t.Tuple[TaskID, int]] = {} 128 self.promotion_progress: t.Optional[Progress] = None 129 self.promotion_task: t.Optional[TaskID] = None 130 self.loading_status: t.Dict[uuid.UUID, Status] = {} 131 132 def _print(self, value: t.Any, **kwargs: t.Any) -> None: 133 self.console.print(value) 134 135 def _prompt(self, message: str, **kwargs: t.Any) -> t.Any: 136 return Prompt.ask(message, console=self.console, **kwargs) 137 138 def _confirm(self, message: str, **kwargs: t.Any) -> bool: 139 return Confirm.ask(message, console=self.console, **kwargs) 140 141 def start_snapshot_progress(self, snapshot_name: str, total_batches: int) -> None: 142 """Indicates that a new load progress has begun.""" 143 if not self.evaluation_progress: 144 self.evaluation_progress = Progress( 145 TextColumn("[bold blue]{task.fields[snapshot_name]}", justify="right"), 146 BarColumn(bar_width=40), 147 "[progress.percentage]{task.percentage:>3.1f}%", 148 "•", 149 srich.SchedulerBatchColumn(), 150 "•", 151 TimeElapsedColumn(), 152 console=self.console, 153 ) 154 self.evaluation_progress.start() 155 self.evaluation_tasks = {} 156 self.evaluation_tasks[snapshot_name] = ( 157 self.evaluation_progress.add_task( 158 f"Running {snapshot_name}...", 159 snapshot_name=snapshot_name, 160 total=total_batches, 161 ), 162 total_batches, 163 ) 164 165 def update_snapshot_progress(self, snapshot_name: str, num_batches: int) -> None: 166 """Update snapshot progress.""" 167 if self.evaluation_progress and self.evaluation_tasks: 168 task_id = self.evaluation_tasks[snapshot_name][0] 169 self.evaluation_progress.update(task_id, refresh=True, advance=num_batches) 170 171 def stop_snapshot_progress(self, success: bool = True) -> None: 172 """Stop the load progress""" 173 self.evaluation_tasks = {} 174 if self.evaluation_progress: 175 self.evaluation_progress.stop() 176 self.evaluation_progress = None 177 if success: 178 self.log_success("All model batches have been executed successfully") 179 180 def start_promotion_progress(self, environment: str, total_tasks: int) -> None: 181 """Indicates that a new promotion progress has begun.""" 182 if self.promotion_progress is None: 183 self.promotion_progress = Progress( 184 TextColumn(f"[bold blue]Virtually Updating '{environment}'", justify="right"), 185 BarColumn(bar_width=40), 186 "[progress.percentage]{task.percentage:>3.1f}%", 187 "•", 188 TimeElapsedColumn(), 189 console=self.console, 190 ) 191 self.promotion_progress.start() 192 self.promotion_task = self.promotion_progress.add_task( 193 f"Virtually Updating {environment}...", 194 total=total_tasks, 195 ) 196 197 def update_promotion_progress(self, num_tasks: int) -> None: 198 """Update promotion progress.""" 199 if self.promotion_progress is not None and self.promotion_task is not None: 200 self.promotion_progress.update(self.promotion_task, refresh=True, advance=num_tasks) 201 202 def stop_promotion_progress(self, success: bool = True) -> None: 203 """Stop the promotion progress""" 204 self.promotion_task = None 205 if self.promotion_progress is not None: 206 self.promotion_progress.stop() 207 self.promotion_progress = None 208 if success: 209 self.log_success("The target environment has been updated successfully") 210 211 def show_model_difference_summary( 212 self, context_diff: ContextDiff, detailed: bool = False 213 ) -> None: 214 """Shows a summary of the differences. 215 216 Args: 217 context_diff: The context diff to use to print the summary 218 detailed: Show the actual SQL differences if True. 219 """ 220 if context_diff.is_new_environment: 221 self._print( 222 Tree( 223 f"[bold]New environment `{context_diff.environment}` will be created from `{context_diff.create_from}`" 224 ) 225 ) 226 if not context_diff.has_snapshot_changes: 227 return 228 229 if not context_diff.has_changes: 230 self._print(Tree(f"[bold]No differences when compared to `{context_diff.environment}`")) 231 return 232 233 tree = Tree(f"[bold]Summary of differences against `{context_diff.environment}`:") 234 235 if context_diff.added: 236 added_tree = Tree(f"[bold][added]Added Models:") 237 for model in context_diff.added: 238 added_tree.add(f"[added]{model}") 239 tree.add(added_tree) 240 241 if context_diff.removed: 242 removed_tree = Tree(f"[bold][removed]Removed Models:") 243 for model in context_diff.removed: 244 removed_tree.add(f"[removed]{model}") 245 tree.add(removed_tree) 246 247 if context_diff.modified_snapshots: 248 direct = Tree(f"[bold][direct]Directly Modified:") 249 indirect = Tree(f"[bold][indirect]Indirectly Modified:") 250 metadata = Tree(f"[bold][metadata]Metadata Updated:") 251 for model in context_diff.modified_snapshots: 252 if context_diff.directly_modified(model): 253 direct.add( 254 Syntax(f"{model}\n{context_diff.text_diff(model)}", "sql") 255 if detailed 256 else f"[direct]{model}" 257 ) 258 elif context_diff.indirectly_modified(model): 259 indirect.add(f"[indirect]{model}") 260 elif context_diff.metadata_updated(model): 261 metadata.add(f"[metadata]{model}") 262 if direct.children: 263 tree.add(direct) 264 if indirect.children: 265 tree.add(indirect) 266 if metadata.children: 267 tree.add(metadata) 268 self._print(tree) 269 270 def plan(self, plan: Plan, auto_apply: bool) -> None: 271 """The main plan flow. 272 273 The console should present the user with choices on how to backfill and version the snapshots 274 of a plan. 275 276 Args: 277 plan: The plan to make choices for. 278 auto_apply: Whether to automatically apply the plan after all choices have been made. 279 """ 280 self._prompt_categorize(plan, auto_apply) 281 self._show_options_after_categorization(plan, auto_apply) 282 283 if auto_apply: 284 plan.apply() 285 286 def _show_options_after_categorization(self, plan: Plan, auto_apply: bool) -> None: 287 if plan.requires_backfill: 288 self._show_missing_dates(plan) 289 self._prompt_backfill(plan, auto_apply) 290 elif plan.context_diff.has_changes and not auto_apply: 291 self._prompt_promote(plan) 292 293 def _prompt_categorize(self, plan: Plan, auto_apply: bool) -> None: 294 """Get the user's change category for the directly modified models""" 295 self.show_model_difference_summary(plan.context_diff) 296 297 self._show_categorized_snapshots(plan) 298 299 for snapshot in plan.uncategorized: 300 self._print(Syntax(plan.context_diff.text_diff(snapshot.name), "sql")) 301 tree = Tree(f"[bold][direct]Directly Modified: {snapshot.name}") 302 indirect_tree = None 303 304 for child in plan.indirectly_modified[snapshot.name]: 305 if not indirect_tree: 306 indirect_tree = Tree(f"[indirect]Indirectly Modified Children:") 307 tree.add(indirect_tree) 308 indirect_tree.add(f"[indirect]{child}") 309 self._print(tree) 310 self._get_snapshot_change_category(snapshot, plan, auto_apply) 311 312 def _show_categorized_snapshots(self, plan: Plan) -> None: 313 context_diff = plan.context_diff 314 for snapshot in plan.categorized: 315 if not context_diff.directly_modified(snapshot.name): 316 continue 317 318 category_str = SNAPSHOT_CHANGE_CATEGORY_STR[plan.snapshot_change_category(snapshot)] 319 tree = Tree(f"[bold][direct]Directly Modified: {snapshot.name} ({category_str})") 320 syntax_dff = Syntax(context_diff.text_diff(snapshot.name), "sql") 321 indirect_tree = None 322 for child in plan.indirectly_modified[snapshot.name]: 323 if not indirect_tree: 324 indirect_tree = Tree(f"[indirect]Indirectly Modified Children:") 325 tree.add(indirect_tree) 326 indirect_tree.add(f"[indirect]{child}") 327 self._print(syntax_dff) 328 self._print(tree) 329 330 def _show_missing_dates(self, plan: Plan) -> None: 331 """Displays the models with missing dates""" 332 if not plan.missing_intervals: 333 return 334 backfill = Tree("[bold]Models needing backfill (missing dates):") 335 for missing in plan.missing_intervals: 336 backfill.add(f"{missing.snapshot_name}: {missing.format_missing_range()}") 337 self._print(backfill) 338 339 def _prompt_backfill(self, plan: Plan, auto_apply: bool) -> None: 340 is_forward_only_dev = plan.is_dev and plan.forward_only 341 backfill_or_preview = "preview" if is_forward_only_dev else "backfill" 342 343 if plan.is_start_and_end_allowed: 344 if not plan.override_start: 345 blank_meaning = ( 346 "to preview starting from yesterday" 347 if is_forward_only_dev 348 else "for the beginning of history" 349 ) 350 start = self._prompt( 351 f"Enter the {backfill_or_preview} start date (eg. '1 year', '2020-01-01') or blank {blank_meaning}", 352 ) 353 if start: 354 plan.start = start 355 356 if not plan.override_end: 357 end = self._prompt( 358 f"Enter the {backfill_or_preview} end date (eg. '1 month ago', '2020-01-01') or blank to {backfill_or_preview} up until now", 359 ) 360 if end: 361 plan.end = end 362 363 if not auto_apply and self._confirm(f"Apply - {backfill_or_preview.capitalize()} Tables"): 364 plan.apply() 365 366 def _prompt_promote(self, plan: Plan) -> None: 367 if self._confirm( 368 f"Apply - Virtual Update", 369 ): 370 plan.apply() 371 372 def log_test_results( 373 self, result: unittest.result.TestResult, output: str, target_dialect: str 374 ) -> None: 375 divider_length = 70 376 if result.wasSuccessful(): 377 self._print("=" * divider_length) 378 self._print( 379 f"Successfully Ran {str(result.testsRun)} tests against {target_dialect}", 380 style="green", 381 ) 382 self._print("-" * divider_length) 383 else: 384 self._print("-" * divider_length) 385 self._print("Test Failure Summary") 386 self._print("=" * divider_length) 387 self._print( 388 f"Num Successful Tests: {result.testsRun - len(result.failures) - len(result.errors)}" 389 ) 390 for test, _ in result.failures + result.errors: 391 if isinstance(test, ModelTest): 392 self._print(f"Failure Test: {test.model_name} {test.test_name}") 393 self._print("=" * divider_length) 394 self._print(output) 395 396 def show_sql(self, sql: str) -> None: 397 self._print(Syntax(sql, "sql")) 398 399 def log_status_update(self, message: str) -> None: 400 self._print(message) 401 402 def log_error(self, message: str) -> None: 403 self._print(f"[red]{message}[/red]") 404 405 def log_success(self, message: str) -> None: 406 self._print(f"\n[green]{message}[/green]\n") 407 408 def loading_start(self, message: t.Optional[str] = None) -> uuid.UUID: 409 id = uuid.uuid4() 410 self.loading_status[id] = Status(message or "", console=self.console, spinner="line") 411 self.loading_status[id].start() 412 return id 413 414 def loading_stop(self, id: uuid.UUID) -> None: 415 self.loading_status[id].stop() 416 del self.loading_status[id] 417 418 def _get_snapshot_change_category( 419 self, snapshot: Snapshot, plan: Plan, auto_apply: bool 420 ) -> None: 421 choices = self._snapshot_change_choices(snapshot) 422 response = self._prompt( 423 "\n".join([f"[{i+1}] {choice}" for i, choice in enumerate(choices.values())]), 424 show_choices=False, 425 choices=[f"{i+1}" for i in range(len(choices))], 426 ) 427 choice = list(choices)[int(response) - 1] 428 plan.set_choice(snapshot, choice) 429 430 def _snapshot_change_choices( 431 self, snapshot: Snapshot, use_rich_formatting: bool = True 432 ) -> t.Dict[SnapshotChangeCategory, str]: 433 direct = snapshot.name 434 if use_rich_formatting: 435 direct = f"[direct]{direct}[/direct]" 436 indirect = "indirectly modified children" 437 if use_rich_formatting: 438 indirect = f"[indirect]{indirect}[/indirect]" 439 if snapshot.is_view_kind: 440 choices = { 441 SnapshotChangeCategory.BREAKING: f"Update {direct} and backfill {indirect}", 442 SnapshotChangeCategory.NON_BREAKING: f"Update {direct} but don't backfill {indirect}", 443 } 444 elif snapshot.is_embedded_kind: 445 choices = { 446 SnapshotChangeCategory.BREAKING: f"Backfill {indirect}", 447 SnapshotChangeCategory.NON_BREAKING: f"Don't backfill {indirect}", 448 } 449 else: 450 choices = { 451 SnapshotChangeCategory.BREAKING: f"Backfill {direct} and {indirect}", 452 SnapshotChangeCategory.NON_BREAKING: f"Backfill {direct} but not {indirect}", 453 } 454 labeled_choices = { 455 k: f"[{SNAPSHOT_CHANGE_CATEGORY_STR[k]}] {v}" for k, v in choices.items() 456 } 457 return labeled_choices 458 459 460def add_to_layout_widget(target_widget: LayoutWidget, *widgets: widgets.Widget) -> LayoutWidget: 461 """Helper function to add a widget to a layout widget 462 Args: 463 target_widget: The layout widget to add the other widget(s) to 464 *widgets: The widgets to add to the layout widget 465 466 Returns: 467 The layout widget with the children added 468 """ 469 target_widget.children += tuple(widgets) 470 return target_widget 471 472 473class NotebookMagicConsole(TerminalConsole): 474 """ 475 Console to be used when using the magic notebook interface (`%<command>`). 476 Generally reuses the Terminal console when possible by either directly outputing what it provides 477 or capturing it and converting it into a widget. 478 """ 479 480 def __init__( 481 self, 482 display: t.Optional[t.Callable] = None, 483 console: t.Optional[RichConsole] = None, 484 **kwargs: t.Any, 485 ) -> None: 486 import ipywidgets as widgets 487 from IPython.display import display as ipython_display 488 489 super().__init__(console, **kwargs) 490 self.display = display or get_ipython().user_ns.get("display", ipython_display) # type: ignore 491 self.missing_dates_output = widgets.Output() 492 self.dynamic_options_after_categorization_output = widgets.VBox() 493 494 def _show_missing_dates(self, plan: Plan) -> None: 495 self._add_to_dynamic_options(self.missing_dates_output) 496 self.missing_dates_output.outputs = () 497 with self.missing_dates_output: 498 super()._show_missing_dates(plan) 499 500 def _apply(self, button: widgets.Button) -> None: 501 button.disabled = True 502 with button.output: 503 button.plan.apply() 504 505 def _prompt_promote(self, plan: Plan) -> None: 506 import ipywidgets as widgets 507 508 button = widgets.Button( 509 description="Apply - Virtual Update", 510 disabled=False, 511 button_style="success", 512 # Auto will make the button really large. 513 # Likely changing this soon anyways to be just `Apply` with description above 514 layout={"width": "10rem"}, 515 ) 516 self._add_to_dynamic_options(button) 517 output = widgets.Output() 518 self._add_to_dynamic_options(output) 519 520 button.plan = plan 521 button.on_click(self._apply) 522 button.output = output 523 524 def _prompt_backfill(self, plan: Plan, auto_apply: bool) -> None: 525 import ipywidgets as widgets 526 527 prompt = widgets.VBox() 528 529 backfill_or_preview = "Preview" if plan.is_dev and plan.forward_only else "Backfill" 530 531 def _date_picker( 532 plan: Plan, value: t.Any, on_change: t.Callable, disabled: bool = False 533 ) -> widgets.DatePicker: 534 picker = widgets.DatePicker( 535 disabled=disabled, 536 value=value, 537 layout={"width": "auto"}, 538 ) 539 540 picker.observe(on_change, "value") 541 return picker 542 543 def _checkbox(description: str, value: bool, on_change: t.Callable) -> widgets.Checkbox: 544 checkbox = widgets.Checkbox( 545 value=value, 546 description=description, 547 disabled=False, 548 indent=False, 549 ) 550 551 checkbox.observe(on_change, "value") 552 return checkbox 553 554 def start_change_callback(change: t.Dict[str, datetime.datetime]) -> None: 555 plan.start = change["new"] 556 self._show_options_after_categorization(plan, auto_apply) 557 558 def end_change_callback(change: t.Dict[str, datetime.datetime]) -> None: 559 plan.end = change["new"] 560 self._show_options_after_categorization(plan, auto_apply) 561 562 if plan.is_start_and_end_allowed: 563 add_to_layout_widget( 564 prompt, 565 widgets.HBox( 566 [ 567 widgets.Label( 568 f"Start {backfill_or_preview} Date:", layout={"width": "8rem"} 569 ), 570 _date_picker(plan, to_date(plan.start), start_change_callback), 571 ] 572 ), 573 ) 574 575 add_to_layout_widget( 576 prompt, 577 widgets.HBox( 578 [ 579 widgets.Label(f"End {backfill_or_preview} Date:", layout={"width": "8rem"}), 580 _date_picker( 581 plan, 582 to_date(plan.end), 583 end_change_callback, 584 ), 585 ] 586 ), 587 ) 588 589 self._add_to_dynamic_options(prompt) 590 591 if not auto_apply: 592 button = widgets.Button( 593 description=f"Apply - {backfill_or_preview} Tables", 594 disabled=False, 595 button_style="success", 596 ) 597 self._add_to_dynamic_options(button) 598 output = widgets.Output() 599 self._add_to_dynamic_options(output) 600 601 button.plan = plan 602 button.on_click(self._apply) 603 button.output = output 604 605 def _show_options_after_categorization(self, plan: Plan, auto_apply: bool) -> None: 606 self.dynamic_options_after_categorization_output.children = () 607 self.display(self.dynamic_options_after_categorization_output) 608 super()._show_options_after_categorization(plan, auto_apply) 609 610 def _add_to_dynamic_options(self, *widgets: widgets.Widget) -> None: 611 add_to_layout_widget(self.dynamic_options_after_categorization_output, *widgets) 612 613 def _get_snapshot_change_category( 614 self, snapshot: Snapshot, plan: Plan, auto_apply: bool 615 ) -> None: 616 import ipywidgets as widgets 617 618 def radio_button_selected(change: t.Dict[str, t.Any]) -> None: 619 plan.set_choice(snapshot, choices[change["owner"].index]) 620 self._show_options_after_categorization(plan, auto_apply) 621 622 choice_mapping = self._snapshot_change_choices(snapshot, use_rich_formatting=False) 623 choices = list(choice_mapping) 624 plan.set_choice(snapshot, choices[0]) 625 626 radio = widgets.RadioButtons( 627 options=choice_mapping.values(), 628 layout={"width": "max-content"}, 629 disabled=False, 630 ) 631 radio.observe( 632 radio_button_selected, 633 "value", 634 ) 635 self.display(radio) 636 637 def log_test_results( 638 self, result: unittest.result.TestResult, output: str, target_dialect: str 639 ) -> None: 640 import ipywidgets as widgets 641 642 divider_length = 70 643 shared_style = { 644 "font-size": "11px", 645 "font-weight": "bold", 646 "font-family": "Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace", 647 } 648 if result.wasSuccessful(): 649 success_color = {"color": "#008000"} 650 header = str(h("span", {"style": shared_style}, "-" * divider_length)) 651 message = str( 652 h( 653 "span", 654 {"style": {**shared_style, **success_color}}, 655 f"Successfully Ran {str(result.testsRun)} Tests Against {target_dialect}", 656 ) 657 ) 658 footer = str(h("span", {"style": shared_style}, "=" * divider_length)) 659 self.display(widgets.HTML("<br>".join([header, message, footer]))) 660 else: 661 fail_color = {"color": "#db3737"} 662 fail_shared_style = {**shared_style, **fail_color} 663 header = str(h("span", {"style": fail_shared_style}, "-" * divider_length)) 664 message = str(h("span", {"style": fail_shared_style}, "Test Failure Summary")) 665 num_success = str( 666 h( 667 "span", 668 {"style": fail_shared_style}, 669 f"Num Successful Tests: {result.testsRun - len(result.failures) - len(result.errors)}", 670 ) 671 ) 672 failure_tests = [] 673 for test, _ in result.failures + result.errors: 674 if isinstance(test, ModelTest): 675 failure_tests.append( 676 str( 677 h( 678 "span", 679 {"style": fail_shared_style}, 680 f"Failure Test: {test.model_name} {test.test_name}", 681 ) 682 ) 683 ) 684 failures = "<br>".join(failure_tests) 685 footer = str(h("span", {"style": fail_shared_style}, "=" * divider_length)) 686 error_output = widgets.Textarea(output, layout={"height": "300px", "width": "100%"}) 687 test_info = widgets.HTML( 688 "<br>".join([header, message, footer, num_success, failures, footer]) 689 ) 690 self.display(widgets.VBox(children=[test_info, error_output], layout={"width": "100%"})) 691 692 693class DatabricksMagicConsole(TerminalConsole): 694 """ 695 Note: Databricks Magic Console currently does not support progress bars while a plan is being applied. The 696 NotebookMagicConsole does support progress bars, but they will time out after 5 minutes of execution 697 and it makes it difficult to see the progress of the plan. 698 """ 699 700 def _print(self, value: t.Any, **kwargs: t.Any) -> None: 701 with self.console.capture() as capture: 702 self.console.print(value, **kwargs) 703 output = capture.get() 704 print(output) 705 706 def _prompt(self, message: str, **kwargs: t.Any) -> t.Any: 707 self._print(message) 708 return super()._prompt("", **kwargs) 709 710 def _confirm(self, message: str, **kwargs: t.Any) -> bool: 711 message = f"{message} \[y/n]" 712 self._print(message) 713 return super()._confirm("", **kwargs) 714 715 716def get_console(**kwargs: t.Any) -> TerminalConsole | DatabricksMagicConsole | NotebookMagicConsole: 717 """ 718 Returns the console that is appropriate for the current runtime environment. 719 720 Note: Google Colab environment is untested and currently assumes is compatible with the base 721 NotebookMagicConsole. 722 """ 723 from sqlmesh import RuntimeEnv, runtime_env 724 725 runtime_env_mapping = { 726 RuntimeEnv.DATABRICKS: DatabricksMagicConsole, 727 RuntimeEnv.JUPYTER: NotebookMagicConsole, 728 RuntimeEnv.TERMINAL: TerminalConsole, 729 RuntimeEnv.GOOGLE_COLAB: NotebookMagicConsole, 730 } 731 return runtime_env_mapping[runtime_env](**kwargs)
39class Console(abc.ABC): 40 """Abstract base class for defining classes used for displaying information to the user and also interact 41 with them when their input is needed""" 42 43 @abc.abstractmethod 44 def start_snapshot_progress(self, snapshot_name: str, total_batches: int) -> None: 45 """Indicates that a new load progress has begun.""" 46 47 @abc.abstractmethod 48 def update_snapshot_progress(self, snapshot_name: str, num_batches: int) -> None: 49 """Update snapshot progress.""" 50 51 @abc.abstractmethod 52 def stop_snapshot_progress(self, success: bool = True) -> None: 53 """Stop the load progress""" 54 55 @abc.abstractmethod 56 def start_promotion_progress(self, environment: str, total_tasks: int) -> None: 57 """Indicates that a new promotion progress has begun.""" 58 59 @abc.abstractmethod 60 def update_promotion_progress(self, num_tasks: int) -> None: 61 """Update promotion progress.""" 62 63 @abc.abstractmethod 64 def stop_promotion_progress(self, success: bool = True) -> None: 65 """Stop the promotion progress""" 66 67 @abc.abstractmethod 68 def show_model_difference_summary( 69 self, context_diff: ContextDiff, detailed: bool = False 70 ) -> None: 71 """Displays a summary of differences for the given models""" 72 73 @abc.abstractmethod 74 def plan(self, plan: Plan, auto_apply: bool) -> None: 75 """The main plan flow. 76 77 The console should present the user with choices on how to backfill and version the snapshots 78 of a plan. 79 80 Args: 81 plan: The plan to make choices for. 82 auto_apply: Whether to automatically apply the plan after all choices have been made. 83 """ 84 85 @abc.abstractmethod 86 def log_test_results( 87 self, result: unittest.result.TestResult, output: str, target_dialect: str 88 ) -> None: 89 """Display the test result and output 90 91 Args: 92 result: The unittest test result that contains metrics like num success, fails, ect. 93 output: The generated output from the unittest 94 target_dialect: The dialect that tests were run against. Assumes all tests run against the same dialect. 95 """ 96 97 @abc.abstractmethod 98 def show_sql(self, sql: str) -> None: 99 """Display to the user SQL""" 100 101 @abc.abstractmethod 102 def log_status_update(self, message: str) -> None: 103 """Display general status update to the user""" 104 105 @abc.abstractmethod 106 def log_error(self, message: str) -> None: 107 """Display error info to the user""" 108 109 @abc.abstractmethod 110 def log_success(self, message: str) -> None: 111 """Display a general successful message to the user""" 112 113 @abc.abstractmethod 114 def loading_start(self, message: t.Optional[str] = None) -> uuid.UUID: 115 """Starts loading and returns a unique ID that can be used to stop the loading. Optionally can display a message""" 116 117 @abc.abstractmethod 118 def loading_stop(self, id: uuid.UUID) -> None: 119 """Stop loading for the given id"""
Abstract base class for defining classes used for displaying information to the user and also interact with them when their input is needed
43 @abc.abstractmethod 44 def start_snapshot_progress(self, snapshot_name: str, total_batches: int) -> None: 45 """Indicates that a new load progress has begun."""
Indicates that a new load progress has begun.
47 @abc.abstractmethod 48 def update_snapshot_progress(self, snapshot_name: str, num_batches: int) -> None: 49 """Update snapshot progress."""
Update snapshot progress.
51 @abc.abstractmethod 52 def stop_snapshot_progress(self, success: bool = True) -> None: 53 """Stop the load progress"""
Stop the load progress
55 @abc.abstractmethod 56 def start_promotion_progress(self, environment: str, total_tasks: int) -> None: 57 """Indicates that a new promotion progress has begun."""
Indicates that a new promotion progress has begun.
59 @abc.abstractmethod 60 def update_promotion_progress(self, num_tasks: int) -> None: 61 """Update promotion progress."""
Update promotion progress.
63 @abc.abstractmethod 64 def stop_promotion_progress(self, success: bool = True) -> None: 65 """Stop the promotion progress"""
Stop the promotion progress
67 @abc.abstractmethod 68 def show_model_difference_summary( 69 self, context_diff: ContextDiff, detailed: bool = False 70 ) -> None: 71 """Displays a summary of differences for the given models"""
Displays a summary of differences for the given models
73 @abc.abstractmethod 74 def plan(self, plan: Plan, auto_apply: bool) -> None: 75 """The main plan flow. 76 77 The console should present the user with choices on how to backfill and version the snapshots 78 of a plan. 79 80 Args: 81 plan: The plan to make choices for. 82 auto_apply: Whether to automatically apply the plan after all choices have been made. 83 """
The main plan flow.
The console should present the user with choices on how to backfill and version the snapshots of a plan.
Arguments:
- plan: The plan to make choices for.
- auto_apply: Whether to automatically apply the plan after all choices have been made.
85 @abc.abstractmethod 86 def log_test_results( 87 self, result: unittest.result.TestResult, output: str, target_dialect: str 88 ) -> None: 89 """Display the test result and output 90 91 Args: 92 result: The unittest test result that contains metrics like num success, fails, ect. 93 output: The generated output from the unittest 94 target_dialect: The dialect that tests were run against. Assumes all tests run against the same dialect. 95 """
Display the test result and output
Arguments:
- result: The unittest test result that contains metrics like num success, fails, ect.
- output: The generated output from the unittest
- target_dialect: The dialect that tests were run against. Assumes all tests run against the same dialect.
101 @abc.abstractmethod 102 def log_status_update(self, message: str) -> None: 103 """Display general status update to the user"""
Display general status update to the user
105 @abc.abstractmethod 106 def log_error(self, message: str) -> None: 107 """Display error info to the user"""
Display error info to the user
109 @abc.abstractmethod 110 def log_success(self, message: str) -> None: 111 """Display a general successful message to the user"""
Display a general successful message to the user
113 @abc.abstractmethod 114 def loading_start(self, message: t.Optional[str] = None) -> uuid.UUID: 115 """Starts loading and returns a unique ID that can be used to stop the loading. Optionally can display a message"""
Starts loading and returns a unique ID that can be used to stop the loading. Optionally can display a message
122class TerminalConsole(Console): 123 """A rich based implementation of the console""" 124 125 def __init__(self, console: t.Optional[RichConsole] = None, **kwargs: t.Any) -> None: 126 self.console: RichConsole = console or srich.console 127 self.evaluation_progress: t.Optional[Progress] = None 128 self.evaluation_tasks: t.Dict[str, t.Tuple[TaskID, int]] = {} 129 self.promotion_progress: t.Optional[Progress] = None 130 self.promotion_task: t.Optional[TaskID] = None 131 self.loading_status: t.Dict[uuid.UUID, Status] = {} 132 133 def _print(self, value: t.Any, **kwargs: t.Any) -> None: 134 self.console.print(value) 135 136 def _prompt(self, message: str, **kwargs: t.Any) -> t.Any: 137 return Prompt.ask(message, console=self.console, **kwargs) 138 139 def _confirm(self, message: str, **kwargs: t.Any) -> bool: 140 return Confirm.ask(message, console=self.console, **kwargs) 141 142 def start_snapshot_progress(self, snapshot_name: str, total_batches: int) -> None: 143 """Indicates that a new load progress has begun.""" 144 if not self.evaluation_progress: 145 self.evaluation_progress = Progress( 146 TextColumn("[bold blue]{task.fields[snapshot_name]}", justify="right"), 147 BarColumn(bar_width=40), 148 "[progress.percentage]{task.percentage:>3.1f}%", 149 "•", 150 srich.SchedulerBatchColumn(), 151 "•", 152 TimeElapsedColumn(), 153 console=self.console, 154 ) 155 self.evaluation_progress.start() 156 self.evaluation_tasks = {} 157 self.evaluation_tasks[snapshot_name] = ( 158 self.evaluation_progress.add_task( 159 f"Running {snapshot_name}...", 160 snapshot_name=snapshot_name, 161 total=total_batches, 162 ), 163 total_batches, 164 ) 165 166 def update_snapshot_progress(self, snapshot_name: str, num_batches: int) -> None: 167 """Update snapshot progress.""" 168 if self.evaluation_progress and self.evaluation_tasks: 169 task_id = self.evaluation_tasks[snapshot_name][0] 170 self.evaluation_progress.update(task_id, refresh=True, advance=num_batches) 171 172 def stop_snapshot_progress(self, success: bool = True) -> None: 173 """Stop the load progress""" 174 self.evaluation_tasks = {} 175 if self.evaluation_progress: 176 self.evaluation_progress.stop() 177 self.evaluation_progress = None 178 if success: 179 self.log_success("All model batches have been executed successfully") 180 181 def start_promotion_progress(self, environment: str, total_tasks: int) -> None: 182 """Indicates that a new promotion progress has begun.""" 183 if self.promotion_progress is None: 184 self.promotion_progress = Progress( 185 TextColumn(f"[bold blue]Virtually Updating '{environment}'", justify="right"), 186 BarColumn(bar_width=40), 187 "[progress.percentage]{task.percentage:>3.1f}%", 188 "•", 189 TimeElapsedColumn(), 190 console=self.console, 191 ) 192 self.promotion_progress.start() 193 self.promotion_task = self.promotion_progress.add_task( 194 f"Virtually Updating {environment}...", 195 total=total_tasks, 196 ) 197 198 def update_promotion_progress(self, num_tasks: int) -> None: 199 """Update promotion progress.""" 200 if self.promotion_progress is not None and self.promotion_task is not None: 201 self.promotion_progress.update(self.promotion_task, refresh=True, advance=num_tasks) 202 203 def stop_promotion_progress(self, success: bool = True) -> None: 204 """Stop the promotion progress""" 205 self.promotion_task = None 206 if self.promotion_progress is not None: 207 self.promotion_progress.stop() 208 self.promotion_progress = None 209 if success: 210 self.log_success("The target environment has been updated successfully") 211 212 def show_model_difference_summary( 213 self, context_diff: ContextDiff, detailed: bool = False 214 ) -> None: 215 """Shows a summary of the differences. 216 217 Args: 218 context_diff: The context diff to use to print the summary 219 detailed: Show the actual SQL differences if True. 220 """ 221 if context_diff.is_new_environment: 222 self._print( 223 Tree( 224 f"[bold]New environment `{context_diff.environment}` will be created from `{context_diff.create_from}`" 225 ) 226 ) 227 if not context_diff.has_snapshot_changes: 228 return 229 230 if not context_diff.has_changes: 231 self._print(Tree(f"[bold]No differences when compared to `{context_diff.environment}`")) 232 return 233 234 tree = Tree(f"[bold]Summary of differences against `{context_diff.environment}`:") 235 236 if context_diff.added: 237 added_tree = Tree(f"[bold][added]Added Models:") 238 for model in context_diff.added: 239 added_tree.add(f"[added]{model}") 240 tree.add(added_tree) 241 242 if context_diff.removed: 243 removed_tree = Tree(f"[bold][removed]Removed Models:") 244 for model in context_diff.removed: 245 removed_tree.add(f"[removed]{model}") 246 tree.add(removed_tree) 247 248 if context_diff.modified_snapshots: 249 direct = Tree(f"[bold][direct]Directly Modified:") 250 indirect = Tree(f"[bold][indirect]Indirectly Modified:") 251 metadata = Tree(f"[bold][metadata]Metadata Updated:") 252 for model in context_diff.modified_snapshots: 253 if context_diff.directly_modified(model): 254 direct.add( 255 Syntax(f"{model}\n{context_diff.text_diff(model)}", "sql") 256 if detailed 257 else f"[direct]{model}" 258 ) 259 elif context_diff.indirectly_modified(model): 260 indirect.add(f"[indirect]{model}") 261 elif context_diff.metadata_updated(model): 262 metadata.add(f"[metadata]{model}") 263 if direct.children: 264 tree.add(direct) 265 if indirect.children: 266 tree.add(indirect) 267 if metadata.children: 268 tree.add(metadata) 269 self._print(tree) 270 271 def plan(self, plan: Plan, auto_apply: bool) -> None: 272 """The main plan flow. 273 274 The console should present the user with choices on how to backfill and version the snapshots 275 of a plan. 276 277 Args: 278 plan: The plan to make choices for. 279 auto_apply: Whether to automatically apply the plan after all choices have been made. 280 """ 281 self._prompt_categorize(plan, auto_apply) 282 self._show_options_after_categorization(plan, auto_apply) 283 284 if auto_apply: 285 plan.apply() 286 287 def _show_options_after_categorization(self, plan: Plan, auto_apply: bool) -> None: 288 if plan.requires_backfill: 289 self._show_missing_dates(plan) 290 self._prompt_backfill(plan, auto_apply) 291 elif plan.context_diff.has_changes and not auto_apply: 292 self._prompt_promote(plan) 293 294 def _prompt_categorize(self, plan: Plan, auto_apply: bool) -> None: 295 """Get the user's change category for the directly modified models""" 296 self.show_model_difference_summary(plan.context_diff) 297 298 self._show_categorized_snapshots(plan) 299 300 for snapshot in plan.uncategorized: 301 self._print(Syntax(plan.context_diff.text_diff(snapshot.name), "sql")) 302 tree = Tree(f"[bold][direct]Directly Modified: {snapshot.name}") 303 indirect_tree = None 304 305 for child in plan.indirectly_modified[snapshot.name]: 306 if not indirect_tree: 307 indirect_tree = Tree(f"[indirect]Indirectly Modified Children:") 308 tree.add(indirect_tree) 309 indirect_tree.add(f"[indirect]{child}") 310 self._print(tree) 311 self._get_snapshot_change_category(snapshot, plan, auto_apply) 312 313 def _show_categorized_snapshots(self, plan: Plan) -> None: 314 context_diff = plan.context_diff 315 for snapshot in plan.categorized: 316 if not context_diff.directly_modified(snapshot.name): 317 continue 318 319 category_str = SNAPSHOT_CHANGE_CATEGORY_STR[plan.snapshot_change_category(snapshot)] 320 tree = Tree(f"[bold][direct]Directly Modified: {snapshot.name} ({category_str})") 321 syntax_dff = Syntax(context_diff.text_diff(snapshot.name), "sql") 322 indirect_tree = None 323 for child in plan.indirectly_modified[snapshot.name]: 324 if not indirect_tree: 325 indirect_tree = Tree(f"[indirect]Indirectly Modified Children:") 326 tree.add(indirect_tree) 327 indirect_tree.add(f"[indirect]{child}") 328 self._print(syntax_dff) 329 self._print(tree) 330 331 def _show_missing_dates(self, plan: Plan) -> None: 332 """Displays the models with missing dates""" 333 if not plan.missing_intervals: 334 return 335 backfill = Tree("[bold]Models needing backfill (missing dates):") 336 for missing in plan.missing_intervals: 337 backfill.add(f"{missing.snapshot_name}: {missing.format_missing_range()}") 338 self._print(backfill) 339 340 def _prompt_backfill(self, plan: Plan, auto_apply: bool) -> None: 341 is_forward_only_dev = plan.is_dev and plan.forward_only 342 backfill_or_preview = "preview" if is_forward_only_dev else "backfill" 343 344 if plan.is_start_and_end_allowed: 345 if not plan.override_start: 346 blank_meaning = ( 347 "to preview starting from yesterday" 348 if is_forward_only_dev 349 else "for the beginning of history" 350 ) 351 start = self._prompt( 352 f"Enter the {backfill_or_preview} start date (eg. '1 year', '2020-01-01') or blank {blank_meaning}", 353 ) 354 if start: 355 plan.start = start 356 357 if not plan.override_end: 358 end = self._prompt( 359 f"Enter the {backfill_or_preview} end date (eg. '1 month ago', '2020-01-01') or blank to {backfill_or_preview} up until now", 360 ) 361 if end: 362 plan.end = end 363 364 if not auto_apply and self._confirm(f"Apply - {backfill_or_preview.capitalize()} Tables"): 365 plan.apply() 366 367 def _prompt_promote(self, plan: Plan) -> None: 368 if self._confirm( 369 f"Apply - Virtual Update", 370 ): 371 plan.apply() 372 373 def log_test_results( 374 self, result: unittest.result.TestResult, output: str, target_dialect: str 375 ) -> None: 376 divider_length = 70 377 if result.wasSuccessful(): 378 self._print("=" * divider_length) 379 self._print( 380 f"Successfully Ran {str(result.testsRun)} tests against {target_dialect}", 381 style="green", 382 ) 383 self._print("-" * divider_length) 384 else: 385 self._print("-" * divider_length) 386 self._print("Test Failure Summary") 387 self._print("=" * divider_length) 388 self._print( 389 f"Num Successful Tests: {result.testsRun - len(result.failures) - len(result.errors)}" 390 ) 391 for test, _ in result.failures + result.errors: 392 if isinstance(test, ModelTest): 393 self._print(f"Failure Test: {test.model_name} {test.test_name}") 394 self._print("=" * divider_length) 395 self._print(output) 396 397 def show_sql(self, sql: str) -> None: 398 self._print(Syntax(sql, "sql")) 399 400 def log_status_update(self, message: str) -> None: 401 self._print(message) 402 403 def log_error(self, message: str) -> None: 404 self._print(f"[red]{message}[/red]") 405 406 def log_success(self, message: str) -> None: 407 self._print(f"\n[green]{message}[/green]\n") 408 409 def loading_start(self, message: t.Optional[str] = None) -> uuid.UUID: 410 id = uuid.uuid4() 411 self.loading_status[id] = Status(message or "", console=self.console, spinner="line") 412 self.loading_status[id].start() 413 return id 414 415 def loading_stop(self, id: uuid.UUID) -> None: 416 self.loading_status[id].stop() 417 del self.loading_status[id] 418 419 def _get_snapshot_change_category( 420 self, snapshot: Snapshot, plan: Plan, auto_apply: bool 421 ) -> None: 422 choices = self._snapshot_change_choices(snapshot) 423 response = self._prompt( 424 "\n".join([f"[{i+1}] {choice}" for i, choice in enumerate(choices.values())]), 425 show_choices=False, 426 choices=[f"{i+1}" for i in range(len(choices))], 427 ) 428 choice = list(choices)[int(response) - 1] 429 plan.set_choice(snapshot, choice) 430 431 def _snapshot_change_choices( 432 self, snapshot: Snapshot, use_rich_formatting: bool = True 433 ) -> t.Dict[SnapshotChangeCategory, str]: 434 direct = snapshot.name 435 if use_rich_formatting: 436 direct = f"[direct]{direct}[/direct]" 437 indirect = "indirectly modified children" 438 if use_rich_formatting: 439 indirect = f"[indirect]{indirect}[/indirect]" 440 if snapshot.is_view_kind: 441 choices = { 442 SnapshotChangeCategory.BREAKING: f"Update {direct} and backfill {indirect}", 443 SnapshotChangeCategory.NON_BREAKING: f"Update {direct} but don't backfill {indirect}", 444 } 445 elif snapshot.is_embedded_kind: 446 choices = { 447 SnapshotChangeCategory.BREAKING: f"Backfill {indirect}", 448 SnapshotChangeCategory.NON_BREAKING: f"Don't backfill {indirect}", 449 } 450 else: 451 choices = { 452 SnapshotChangeCategory.BREAKING: f"Backfill {direct} and {indirect}", 453 SnapshotChangeCategory.NON_BREAKING: f"Backfill {direct} but not {indirect}", 454 } 455 labeled_choices = { 456 k: f"[{SNAPSHOT_CHANGE_CATEGORY_STR[k]}] {v}" for k, v in choices.items() 457 } 458 return labeled_choices
A rich based implementation of the console
125 def __init__(self, console: t.Optional[RichConsole] = None, **kwargs: t.Any) -> None: 126 self.console: RichConsole = console or srich.console 127 self.evaluation_progress: t.Optional[Progress] = None 128 self.evaluation_tasks: t.Dict[str, t.Tuple[TaskID, int]] = {} 129 self.promotion_progress: t.Optional[Progress] = None 130 self.promotion_task: t.Optional[TaskID] = None 131 self.loading_status: t.Dict[uuid.UUID, Status] = {}
142 def start_snapshot_progress(self, snapshot_name: str, total_batches: int) -> None: 143 """Indicates that a new load progress has begun.""" 144 if not self.evaluation_progress: 145 self.evaluation_progress = Progress( 146 TextColumn("[bold blue]{task.fields[snapshot_name]}", justify="right"), 147 BarColumn(bar_width=40), 148 "[progress.percentage]{task.percentage:>3.1f}%", 149 "•", 150 srich.SchedulerBatchColumn(), 151 "•", 152 TimeElapsedColumn(), 153 console=self.console, 154 ) 155 self.evaluation_progress.start() 156 self.evaluation_tasks = {} 157 self.evaluation_tasks[snapshot_name] = ( 158 self.evaluation_progress.add_task( 159 f"Running {snapshot_name}...", 160 snapshot_name=snapshot_name, 161 total=total_batches, 162 ), 163 total_batches, 164 )
Indicates that a new load progress has begun.
166 def update_snapshot_progress(self, snapshot_name: str, num_batches: int) -> None: 167 """Update snapshot progress.""" 168 if self.evaluation_progress and self.evaluation_tasks: 169 task_id = self.evaluation_tasks[snapshot_name][0] 170 self.evaluation_progress.update(task_id, refresh=True, advance=num_batches)
Update snapshot progress.
172 def stop_snapshot_progress(self, success: bool = True) -> None: 173 """Stop the load progress""" 174 self.evaluation_tasks = {} 175 if self.evaluation_progress: 176 self.evaluation_progress.stop() 177 self.evaluation_progress = None 178 if success: 179 self.log_success("All model batches have been executed successfully")
Stop the load progress
181 def start_promotion_progress(self, environment: str, total_tasks: int) -> None: 182 """Indicates that a new promotion progress has begun.""" 183 if self.promotion_progress is None: 184 self.promotion_progress = Progress( 185 TextColumn(f"[bold blue]Virtually Updating '{environment}'", justify="right"), 186 BarColumn(bar_width=40), 187 "[progress.percentage]{task.percentage:>3.1f}%", 188 "•", 189 TimeElapsedColumn(), 190 console=self.console, 191 ) 192 self.promotion_progress.start() 193 self.promotion_task = self.promotion_progress.add_task( 194 f"Virtually Updating {environment}...", 195 total=total_tasks, 196 )
Indicates that a new promotion progress has begun.
198 def update_promotion_progress(self, num_tasks: int) -> None: 199 """Update promotion progress.""" 200 if self.promotion_progress is not None and self.promotion_task is not None: 201 self.promotion_progress.update(self.promotion_task, refresh=True, advance=num_tasks)
Update promotion progress.
203 def stop_promotion_progress(self, success: bool = True) -> None: 204 """Stop the promotion progress""" 205 self.promotion_task = None 206 if self.promotion_progress is not None: 207 self.promotion_progress.stop() 208 self.promotion_progress = None 209 if success: 210 self.log_success("The target environment has been updated successfully")
Stop the promotion progress
212 def show_model_difference_summary( 213 self, context_diff: ContextDiff, detailed: bool = False 214 ) -> None: 215 """Shows a summary of the differences. 216 217 Args: 218 context_diff: The context diff to use to print the summary 219 detailed: Show the actual SQL differences if True. 220 """ 221 if context_diff.is_new_environment: 222 self._print( 223 Tree( 224 f"[bold]New environment `{context_diff.environment}` will be created from `{context_diff.create_from}`" 225 ) 226 ) 227 if not context_diff.has_snapshot_changes: 228 return 229 230 if not context_diff.has_changes: 231 self._print(Tree(f"[bold]No differences when compared to `{context_diff.environment}`")) 232 return 233 234 tree = Tree(f"[bold]Summary of differences against `{context_diff.environment}`:") 235 236 if context_diff.added: 237 added_tree = Tree(f"[bold][added]Added Models:") 238 for model in context_diff.added: 239 added_tree.add(f"[added]{model}") 240 tree.add(added_tree) 241 242 if context_diff.removed: 243 removed_tree = Tree(f"[bold][removed]Removed Models:") 244 for model in context_diff.removed: 245 removed_tree.add(f"[removed]{model}") 246 tree.add(removed_tree) 247 248 if context_diff.modified_snapshots: 249 direct = Tree(f"[bold][direct]Directly Modified:") 250 indirect = Tree(f"[bold][indirect]Indirectly Modified:") 251 metadata = Tree(f"[bold][metadata]Metadata Updated:") 252 for model in context_diff.modified_snapshots: 253 if context_diff.directly_modified(model): 254 direct.add( 255 Syntax(f"{model}\n{context_diff.text_diff(model)}", "sql") 256 if detailed 257 else f"[direct]{model}" 258 ) 259 elif context_diff.indirectly_modified(model): 260 indirect.add(f"[indirect]{model}") 261 elif context_diff.metadata_updated(model): 262 metadata.add(f"[metadata]{model}") 263 if direct.children: 264 tree.add(direct) 265 if indirect.children: 266 tree.add(indirect) 267 if metadata.children: 268 tree.add(metadata) 269 self._print(tree)
Shows a summary of the differences.
Arguments:
- context_diff: The context diff to use to print the summary
- detailed: Show the actual SQL differences if True.
271 def plan(self, plan: Plan, auto_apply: bool) -> None: 272 """The main plan flow. 273 274 The console should present the user with choices on how to backfill and version the snapshots 275 of a plan. 276 277 Args: 278 plan: The plan to make choices for. 279 auto_apply: Whether to automatically apply the plan after all choices have been made. 280 """ 281 self._prompt_categorize(plan, auto_apply) 282 self._show_options_after_categorization(plan, auto_apply) 283 284 if auto_apply: 285 plan.apply()
The main plan flow.
The console should present the user with choices on how to backfill and version the snapshots of a plan.
Arguments:
- plan: The plan to make choices for.
- auto_apply: Whether to automatically apply the plan after all choices have been made.
373 def log_test_results( 374 self, result: unittest.result.TestResult, output: str, target_dialect: str 375 ) -> None: 376 divider_length = 70 377 if result.wasSuccessful(): 378 self._print("=" * divider_length) 379 self._print( 380 f"Successfully Ran {str(result.testsRun)} tests against {target_dialect}", 381 style="green", 382 ) 383 self._print("-" * divider_length) 384 else: 385 self._print("-" * divider_length) 386 self._print("Test Failure Summary") 387 self._print("=" * divider_length) 388 self._print( 389 f"Num Successful Tests: {result.testsRun - len(result.failures) - len(result.errors)}" 390 ) 391 for test, _ in result.failures + result.errors: 392 if isinstance(test, ModelTest): 393 self._print(f"Failure Test: {test.model_name} {test.test_name}") 394 self._print("=" * divider_length) 395 self._print(output)
Display the test result and output
Arguments:
- result: The unittest test result that contains metrics like num success, fails, ect.
- output: The generated output from the unittest
- target_dialect: The dialect that tests were run against. Assumes all tests run against the same dialect.
409 def loading_start(self, message: t.Optional[str] = None) -> uuid.UUID: 410 id = uuid.uuid4() 411 self.loading_status[id] = Status(message or "", console=self.console, spinner="line") 412 self.loading_status[id].start() 413 return id
Starts loading and returns a unique ID that can be used to stop the loading. Optionally can display a message
461def add_to_layout_widget(target_widget: LayoutWidget, *widgets: widgets.Widget) -> LayoutWidget: 462 """Helper function to add a widget to a layout widget 463 Args: 464 target_widget: The layout widget to add the other widget(s) to 465 *widgets: The widgets to add to the layout widget 466 467 Returns: 468 The layout widget with the children added 469 """ 470 target_widget.children += tuple(widgets) 471 return target_widget
Helper function to add a widget to a layout widget
Arguments:
- target_widget: The layout widget to add the other widget(s) to
- *widgets: The widgets to add to the layout widget
Returns:
The layout widget with the children added
474class NotebookMagicConsole(TerminalConsole): 475 """ 476 Console to be used when using the magic notebook interface (`%<command>`). 477 Generally reuses the Terminal console when possible by either directly outputing what it provides 478 or capturing it and converting it into a widget. 479 """ 480 481 def __init__( 482 self, 483 display: t.Optional[t.Callable] = None, 484 console: t.Optional[RichConsole] = None, 485 **kwargs: t.Any, 486 ) -> None: 487 import ipywidgets as widgets 488 from IPython.display import display as ipython_display 489 490 super().__init__(console, **kwargs) 491 self.display = display or get_ipython().user_ns.get("display", ipython_display) # type: ignore 492 self.missing_dates_output = widgets.Output() 493 self.dynamic_options_after_categorization_output = widgets.VBox() 494 495 def _show_missing_dates(self, plan: Plan) -> None: 496 self._add_to_dynamic_options(self.missing_dates_output) 497 self.missing_dates_output.outputs = () 498 with self.missing_dates_output: 499 super()._show_missing_dates(plan) 500 501 def _apply(self, button: widgets.Button) -> None: 502 button.disabled = True 503 with button.output: 504 button.plan.apply() 505 506 def _prompt_promote(self, plan: Plan) -> None: 507 import ipywidgets as widgets 508 509 button = widgets.Button( 510 description="Apply - Virtual Update", 511 disabled=False, 512 button_style="success", 513 # Auto will make the button really large. 514 # Likely changing this soon anyways to be just `Apply` with description above 515 layout={"width": "10rem"}, 516 ) 517 self._add_to_dynamic_options(button) 518 output = widgets.Output() 519 self._add_to_dynamic_options(output) 520 521 button.plan = plan 522 button.on_click(self._apply) 523 button.output = output 524 525 def _prompt_backfill(self, plan: Plan, auto_apply: bool) -> None: 526 import ipywidgets as widgets 527 528 prompt = widgets.VBox() 529 530 backfill_or_preview = "Preview" if plan.is_dev and plan.forward_only else "Backfill" 531 532 def _date_picker( 533 plan: Plan, value: t.Any, on_change: t.Callable, disabled: bool = False 534 ) -> widgets.DatePicker: 535 picker = widgets.DatePicker( 536 disabled=disabled, 537 value=value, 538 layout={"width": "auto"}, 539 ) 540 541 picker.observe(on_change, "value") 542 return picker 543 544 def _checkbox(description: str, value: bool, on_change: t.Callable) -> widgets.Checkbox: 545 checkbox = widgets.Checkbox( 546 value=value, 547 description=description, 548 disabled=False, 549 indent=False, 550 ) 551 552 checkbox.observe(on_change, "value") 553 return checkbox 554 555 def start_change_callback(change: t.Dict[str, datetime.datetime]) -> None: 556 plan.start = change["new"] 557 self._show_options_after_categorization(plan, auto_apply) 558 559 def end_change_callback(change: t.Dict[str, datetime.datetime]) -> None: 560 plan.end = change["new"] 561 self._show_options_after_categorization(plan, auto_apply) 562 563 if plan.is_start_and_end_allowed: 564 add_to_layout_widget( 565 prompt, 566 widgets.HBox( 567 [ 568 widgets.Label( 569 f"Start {backfill_or_preview} Date:", layout={"width": "8rem"} 570 ), 571 _date_picker(plan, to_date(plan.start), start_change_callback), 572 ] 573 ), 574 ) 575 576 add_to_layout_widget( 577 prompt, 578 widgets.HBox( 579 [ 580 widgets.Label(f"End {backfill_or_preview} Date:", layout={"width": "8rem"}), 581 _date_picker( 582 plan, 583 to_date(plan.end), 584 end_change_callback, 585 ), 586 ] 587 ), 588 ) 589 590 self._add_to_dynamic_options(prompt) 591 592 if not auto_apply: 593 button = widgets.Button( 594 description=f"Apply - {backfill_or_preview} Tables", 595 disabled=False, 596 button_style="success", 597 ) 598 self._add_to_dynamic_options(button) 599 output = widgets.Output() 600 self._add_to_dynamic_options(output) 601 602 button.plan = plan 603 button.on_click(self._apply) 604 button.output = output 605 606 def _show_options_after_categorization(self, plan: Plan, auto_apply: bool) -> None: 607 self.dynamic_options_after_categorization_output.children = () 608 self.display(self.dynamic_options_after_categorization_output) 609 super()._show_options_after_categorization(plan, auto_apply) 610 611 def _add_to_dynamic_options(self, *widgets: widgets.Widget) -> None: 612 add_to_layout_widget(self.dynamic_options_after_categorization_output, *widgets) 613 614 def _get_snapshot_change_category( 615 self, snapshot: Snapshot, plan: Plan, auto_apply: bool 616 ) -> None: 617 import ipywidgets as widgets 618 619 def radio_button_selected(change: t.Dict[str, t.Any]) -> None: 620 plan.set_choice(snapshot, choices[change["owner"].index]) 621 self._show_options_after_categorization(plan, auto_apply) 622 623 choice_mapping = self._snapshot_change_choices(snapshot, use_rich_formatting=False) 624 choices = list(choice_mapping) 625 plan.set_choice(snapshot, choices[0]) 626 627 radio = widgets.RadioButtons( 628 options=choice_mapping.values(), 629 layout={"width": "max-content"}, 630 disabled=False, 631 ) 632 radio.observe( 633 radio_button_selected, 634 "value", 635 ) 636 self.display(radio) 637 638 def log_test_results( 639 self, result: unittest.result.TestResult, output: str, target_dialect: str 640 ) -> None: 641 import ipywidgets as widgets 642 643 divider_length = 70 644 shared_style = { 645 "font-size": "11px", 646 "font-weight": "bold", 647 "font-family": "Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace", 648 } 649 if result.wasSuccessful(): 650 success_color = {"color": "#008000"} 651 header = str(h("span", {"style": shared_style}, "-" * divider_length)) 652 message = str( 653 h( 654 "span", 655 {"style": {**shared_style, **success_color}}, 656 f"Successfully Ran {str(result.testsRun)} Tests Against {target_dialect}", 657 ) 658 ) 659 footer = str(h("span", {"style": shared_style}, "=" * divider_length)) 660 self.display(widgets.HTML("<br>".join([header, message, footer]))) 661 else: 662 fail_color = {"color": "#db3737"} 663 fail_shared_style = {**shared_style, **fail_color} 664 header = str(h("span", {"style": fail_shared_style}, "-" * divider_length)) 665 message = str(h("span", {"style": fail_shared_style}, "Test Failure Summary")) 666 num_success = str( 667 h( 668 "span", 669 {"style": fail_shared_style}, 670 f"Num Successful Tests: {result.testsRun - len(result.failures) - len(result.errors)}", 671 ) 672 ) 673 failure_tests = [] 674 for test, _ in result.failures + result.errors: 675 if isinstance(test, ModelTest): 676 failure_tests.append( 677 str( 678 h( 679 "span", 680 {"style": fail_shared_style}, 681 f"Failure Test: {test.model_name} {test.test_name}", 682 ) 683 ) 684 ) 685 failures = "<br>".join(failure_tests) 686 footer = str(h("span", {"style": fail_shared_style}, "=" * divider_length)) 687 error_output = widgets.Textarea(output, layout={"height": "300px", "width": "100%"}) 688 test_info = widgets.HTML( 689 "<br>".join([header, message, footer, num_success, failures, footer]) 690 ) 691 self.display(widgets.VBox(children=[test_info, error_output], layout={"width": "100%"}))
Console to be used when using the magic notebook interface (%<command>
).
Generally reuses the Terminal console when possible by either directly outputing what it provides
or capturing it and converting it into a widget.
481 def __init__( 482 self, 483 display: t.Optional[t.Callable] = None, 484 console: t.Optional[RichConsole] = None, 485 **kwargs: t.Any, 486 ) -> None: 487 import ipywidgets as widgets 488 from IPython.display import display as ipython_display 489 490 super().__init__(console, **kwargs) 491 self.display = display or get_ipython().user_ns.get("display", ipython_display) # type: ignore 492 self.missing_dates_output = widgets.Output() 493 self.dynamic_options_after_categorization_output = widgets.VBox()
638 def log_test_results( 639 self, result: unittest.result.TestResult, output: str, target_dialect: str 640 ) -> None: 641 import ipywidgets as widgets 642 643 divider_length = 70 644 shared_style = { 645 "font-size": "11px", 646 "font-weight": "bold", 647 "font-family": "Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace", 648 } 649 if result.wasSuccessful(): 650 success_color = {"color": "#008000"} 651 header = str(h("span", {"style": shared_style}, "-" * divider_length)) 652 message = str( 653 h( 654 "span", 655 {"style": {**shared_style, **success_color}}, 656 f"Successfully Ran {str(result.testsRun)} Tests Against {target_dialect}", 657 ) 658 ) 659 footer = str(h("span", {"style": shared_style}, "=" * divider_length)) 660 self.display(widgets.HTML("<br>".join([header, message, footer]))) 661 else: 662 fail_color = {"color": "#db3737"} 663 fail_shared_style = {**shared_style, **fail_color} 664 header = str(h("span", {"style": fail_shared_style}, "-" * divider_length)) 665 message = str(h("span", {"style": fail_shared_style}, "Test Failure Summary")) 666 num_success = str( 667 h( 668 "span", 669 {"style": fail_shared_style}, 670 f"Num Successful Tests: {result.testsRun - len(result.failures) - len(result.errors)}", 671 ) 672 ) 673 failure_tests = [] 674 for test, _ in result.failures + result.errors: 675 if isinstance(test, ModelTest): 676 failure_tests.append( 677 str( 678 h( 679 "span", 680 {"style": fail_shared_style}, 681 f"Failure Test: {test.model_name} {test.test_name}", 682 ) 683 ) 684 ) 685 failures = "<br>".join(failure_tests) 686 footer = str(h("span", {"style": fail_shared_style}, "=" * divider_length)) 687 error_output = widgets.Textarea(output, layout={"height": "300px", "width": "100%"}) 688 test_info = widgets.HTML( 689 "<br>".join([header, message, footer, num_success, failures, footer]) 690 ) 691 self.display(widgets.VBox(children=[test_info, error_output], layout={"width": "100%"}))
Display the test result and output
Arguments:
- result: The unittest test result that contains metrics like num success, fails, ect.
- output: The generated output from the unittest
- target_dialect: The dialect that tests were run against. Assumes all tests run against the same dialect.
694class DatabricksMagicConsole(TerminalConsole): 695 """ 696 Note: Databricks Magic Console currently does not support progress bars while a plan is being applied. The 697 NotebookMagicConsole does support progress bars, but they will time out after 5 minutes of execution 698 and it makes it difficult to see the progress of the plan. 699 """ 700 701 def _print(self, value: t.Any, **kwargs: t.Any) -> None: 702 with self.console.capture() as capture: 703 self.console.print(value, **kwargs) 704 output = capture.get() 705 print(output) 706 707 def _prompt(self, message: str, **kwargs: t.Any) -> t.Any: 708 self._print(message) 709 return super()._prompt("", **kwargs) 710 711 def _confirm(self, message: str, **kwargs: t.Any) -> bool: 712 message = f"{message} \[y/n]" 713 self._print(message) 714 return super()._confirm("", **kwargs)
Note: Databricks Magic Console currently does not support progress bars while a plan is being applied. The NotebookMagicConsole does support progress bars, but they will time out after 5 minutes of execution and it makes it difficult to see the progress of the plan.
Inherited Members
- TerminalConsole
- TerminalConsole
- start_snapshot_progress
- update_snapshot_progress
- stop_snapshot_progress
- start_promotion_progress
- update_promotion_progress
- stop_promotion_progress
- show_model_difference_summary
- plan
- log_test_results
- show_sql
- log_status_update
- log_error
- log_success
- loading_start
- loading_stop
717def get_console(**kwargs: t.Any) -> TerminalConsole | DatabricksMagicConsole | NotebookMagicConsole: 718 """ 719 Returns the console that is appropriate for the current runtime environment. 720 721 Note: Google Colab environment is untested and currently assumes is compatible with the base 722 NotebookMagicConsole. 723 """ 724 from sqlmesh import RuntimeEnv, runtime_env 725 726 runtime_env_mapping = { 727 RuntimeEnv.DATABRICKS: DatabricksMagicConsole, 728 RuntimeEnv.JUPYTER: NotebookMagicConsole, 729 RuntimeEnv.TERMINAL: TerminalConsole, 730 RuntimeEnv.GOOGLE_COLAB: NotebookMagicConsole, 731 } 732 return runtime_env_mapping[runtime_env](**kwargs)
Returns the console that is appropriate for the current runtime environment.
Note: Google Colab environment is untested and currently assumes is compatible with the base NotebookMagicConsole.