sqlmesh.core.engine_adapter.duckdb
1from __future__ import annotations 2 3import math 4import typing as t 5 6import pandas as pd 7from sqlglot import exp 8 9from sqlmesh.core.engine_adapter.base import EngineAdapter 10from sqlmesh.core.engine_adapter.shared import DataObject, DataObjectType 11 12if t.TYPE_CHECKING: 13 from sqlmesh.core._typing import TableName 14 15 16class DuckDBEngineAdapter(EngineAdapter): 17 DIALECT = "duckdb" 18 19 def _insert_append_pandas_df( 20 self, 21 table_name: TableName, 22 df: pd.DataFrame, 23 columns_to_types: t.Optional[t.Dict[str, exp.DataType]] = None, 24 ) -> None: 25 self.execute( 26 exp.Insert( 27 this=self._insert_into_expression(table_name, columns_to_types), 28 expression="SELECT * FROM df", 29 overwrite=False, 30 ) 31 ) 32 33 def _get_data_objects( 34 self, schema_name: str, catalog_name: t.Optional[str] = None 35 ) -> t.List[DataObject]: 36 """ 37 Returns all the data objects that exist in the given schema and optionally catalog. 38 """ 39 catalog_name = f"'{catalog_name}'" if catalog_name else "NULL" 40 query = f""" 41 SELECT 42 {catalog_name} as catalog, 43 table_name as name, 44 table_schema as schema, 45 CASE table_type 46 WHEN 'BASE TABLE' THEN 'table' 47 WHEN 'VIEW' THEN 'view' 48 WHEN 'LOCAL TEMPORARY' THEN 'table' 49 END as type 50 FROM information_schema.tables 51 WHERE table_schema = '{ schema_name }' 52 """ 53 df = self.fetchdf(query) 54 return [ 55 DataObject( 56 catalog=None if math.isnan(row.catalog) else row.catalog, # type: ignore 57 schema=row.schema, # type: ignore 58 name=row.name, # type: ignore 59 type=DataObjectType.from_str(row.type), # type: ignore 60 ) 61 for row in df.itertuples() 62 ]
17class DuckDBEngineAdapter(EngineAdapter): 18 DIALECT = "duckdb" 19 20 def _insert_append_pandas_df( 21 self, 22 table_name: TableName, 23 df: pd.DataFrame, 24 columns_to_types: t.Optional[t.Dict[str, exp.DataType]] = None, 25 ) -> None: 26 self.execute( 27 exp.Insert( 28 this=self._insert_into_expression(table_name, columns_to_types), 29 expression="SELECT * FROM df", 30 overwrite=False, 31 ) 32 ) 33 34 def _get_data_objects( 35 self, schema_name: str, catalog_name: t.Optional[str] = None 36 ) -> t.List[DataObject]: 37 """ 38 Returns all the data objects that exist in the given schema and optionally catalog. 39 """ 40 catalog_name = f"'{catalog_name}'" if catalog_name else "NULL" 41 query = f""" 42 SELECT 43 {catalog_name} as catalog, 44 table_name as name, 45 table_schema as schema, 46 CASE table_type 47 WHEN 'BASE TABLE' THEN 'table' 48 WHEN 'VIEW' THEN 'view' 49 WHEN 'LOCAL TEMPORARY' THEN 'table' 50 END as type 51 FROM information_schema.tables 52 WHERE table_schema = '{ schema_name }' 53 """ 54 df = self.fetchdf(query) 55 return [ 56 DataObject( 57 catalog=None if math.isnan(row.catalog) else row.catalog, # type: ignore 58 schema=row.schema, # type: ignore 59 name=row.name, # type: ignore 60 type=DataObjectType.from_str(row.type), # type: ignore 61 ) 62 for row in df.itertuples() 63 ]
Base class wrapping a Database API compliant connection.
The EngineAdapter is an easily-subclassable interface that interacts with the underlying engine and data store.
Arguments:
- connection_factory: a callable which produces a new Database API-compliant connection on every call.
- dialect: The dialect with which this adapter is associated.
- multithreaded: Indicates whether this adapter will be used by more than one thread.
Inherited Members
- sqlmesh.core.engine_adapter.base.EngineAdapter
- EngineAdapter
- recycle
- close
- replace_query
- create_index
- create_table
- create_state_table
- create_table_like
- drop_table
- alter_table
- create_view
- create_schema
- drop_schema
- drop_view
- columns
- table_exists
- delete_from
- insert_append
- insert_overwrite_by_time_partition
- update_table
- merge
- rename_table
- fetchone
- fetchall
- fetchdf
- fetch_pyspark_df
- transaction
- supports_transactions
- execute