This is an automated email from the ASF dual-hosted git repository.
villebro pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-superset.git
The following commit(s) were added to refs/heads/master by this push:
new fb51632 Add docstrings and typing to db_engine_specs and sql_parse
(#8058)
fb51632 is described below
commit fb51632e18e03d3401d25035b3a14d7a40370295
Author: Ville Brofeldt <[email protected]>
AuthorDate: Thu Aug 22 06:29:32 2019 +0300
Add docstrings and typing to db_engine_specs and sql_parse (#8058)
* Add typing to db_engine_specs
* Add more type annotations and docstrings
* Add docstrings and typing to sql_parse and db_engine_specs
* Refine select_star
* Fix execute and add more docstrings
* Revert kwargs change from execute
* Remove redundant or
* Align view and table getter schema types
* Fix return type of latest_partition
* Remove some typing from presto
* Improve docstring for __extract_from_token
---
superset/db_engine_specs/athena.py | 13 +-
superset/db_engine_specs/base.py | 358 ++++++++++++++++++++++++++-------
superset/db_engine_specs/bigquery.py | 28 ++-
superset/db_engine_specs/clickhouse.py | 4 +-
superset/db_engine_specs/db2.py | 6 +-
superset/db_engine_specs/drill.py | 7 +-
superset/db_engine_specs/hive.py | 31 +--
superset/db_engine_specs/impala.py | 11 +-
superset/db_engine_specs/kylin.py | 4 +-
superset/db_engine_specs/mssql.py | 17 +-
superset/db_engine_specs/mysql.py | 17 +-
superset/db_engine_specs/oracle.py | 4 +-
superset/db_engine_specs/pinot.py | 8 +-
superset/db_engine_specs/postgres.py | 15 +-
superset/db_engine_specs/presto.py | 53 +++--
superset/db_engine_specs/redshift.py | 7 +-
superset/db_engine_specs/snowflake.py | 4 +-
superset/db_engine_specs/sqlite.py | 9 +-
superset/db_engine_specs/teradata.py | 2 +-
superset/sql_parse.py | 88 ++++----
tests/db_engine_specs_test.py | 4 +-
21 files changed, 496 insertions(+), 194 deletions(-)
diff --git a/superset/db_engine_specs/athena.py
b/superset/db_engine_specs/athena.py
index d1c67af..861516d 100644
--- a/superset/db_engine_specs/athena.py
+++ b/superset/db_engine_specs/athena.py
@@ -15,6 +15,8 @@
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
+from datetime import datetime
+
from superset.db_engine_specs.base import BaseEngineSpec
@@ -38,7 +40,7 @@ class AthenaEngineSpec(BaseEngineSpec):
}
@classmethod
- def convert_dttm(cls, target_type, dttm):
+ def convert_dttm(cls, target_type: str, dttm: datetime) -> str:
tt = target_type.upper()
if tt == "DATE":
return "from_iso8601_date('{}')".format(dttm.isoformat()[:10])
@@ -47,14 +49,15 @@ class AthenaEngineSpec(BaseEngineSpec):
return "CAST ('{}' AS TIMESTAMP)".format(dttm.strftime("%Y-%m-%d
%H:%M:%S"))
@classmethod
- def epoch_to_dttm(cls):
+ def epoch_to_dttm(cls) -> str:
return "from_unixtime({col})"
@staticmethod
- def mutate_label(label):
+ def _mutate_label(label: str) -> str:
"""
Athena only supports lowercase column names and aliases.
- :param str label: Original label which might include uppercase letters
- :return: String that is supported by the database
+
+ :param label: Expected expression label
+ :return: Conditionally mutated label
"""
return label.lower()
diff --git a/superset/db_engine_specs/base.py b/superset/db_engine_specs/base.py
index d4a550c..1a5bb8c 100644
--- a/superset/db_engine_specs/base.py
+++ b/superset/db_engine_specs/base.py
@@ -15,29 +15,37 @@
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
-from collections import namedtuple
+from datetime import datetime
import hashlib
import os
import re
-from typing import Dict, List, Optional, Tuple
+from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
from flask import g
from flask_babel import lazy_gettext as _
import pandas as pd
from sqlalchemy import column, DateTime, select
from sqlalchemy.engine import create_engine
+from sqlalchemy.engine.base import Engine
+from sqlalchemy.engine.interfaces import Compiled, Dialect
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql import quoted_name, text
-from sqlalchemy.sql.expression import ColumnClause
-from sqlalchemy.sql.expression import TextAsFrom
+from sqlalchemy.sql.expression import ColumnClause, ColumnElement, Select,
TextAsFrom
+from sqlalchemy.types import TypeEngine
import sqlparse
from werkzeug.utils import secure_filename
from superset import app, db, sql_parse
from superset.utils import core as utils
-Grain = namedtuple("Grain", "name label function duration")
+
+class TimeGrain(NamedTuple):
+ name: str # TODO: redundant field, remove
+ label: str
+ function: str
+ duration: Optional[str]
+
config = app.config
@@ -45,7 +53,7 @@ config = app.config
QueryStatus = utils.QueryStatus
config = app.config
-builtin_time_grains = {
+builtin_time_grains: Dict[Optional[str], str] = {
None: "Time Column",
"PT1S": "second",
"PT1M": "minute",
@@ -87,7 +95,9 @@ class TimestampExpression(ColumnClause):
@compiles(TimestampExpression)
-def compile_timegrain_expression(element: TimestampExpression, compiler, **kw):
+def compile_timegrain_expression(
+ element: TimestampExpression, compiler: Compiled, **kw
+) -> str:
return element.name.replace("{col}", compiler.process(element.col, **kw))
@@ -99,17 +109,30 @@ class LimitMethod(object):
FORCE_LIMIT = "force_limit"
-def create_time_grains_tuple(time_grains, time_grain_functions, blacklist):
+def _create_time_grains_tuple(
+ time_grains: Dict[Optional[str], str],
+ time_grain_functions: Dict[Optional[str], str],
+ blacklist: List[str],
+) -> Tuple[TimeGrain, ...]:
+ """
+ function for creating a tuple of time grains based on time grains provided
by
+ the engine and any potential additional or blacklisted grains in the
config file.
+
+ :param time_grains: all time grains supported by the engine + config files
+ :param time_grain_functions: mapping between time grain id and sql
expression
+ :param blacklist: list of time grain ids to be excluded
+ :return: final collection of time grains
+ """
ret_list = []
blacklist = blacklist if blacklist else []
for duration, func in time_grain_functions.items():
- if duration not in blacklist:
- name = time_grains.get(duration)
- ret_list.append(Grain(name, _(name), func, duration))
+ if duration in time_grains and duration not in blacklist:
+ name = time_grains[duration]
+ ret_list.append(TimeGrain(name, _(name), func, duration))
return tuple(ret_list)
-class BaseEngineSpec(object):
+class BaseEngineSpec:
"""Abstract class for database engine specific configurations"""
engine = "base" # str as defined in sqlalchemy.engine.engine
@@ -155,23 +178,43 @@ class BaseEngineSpec(object):
return TimestampExpression(time_expr, col, type_=DateTime)
@classmethod
- def get_time_grains(cls):
- blacklist = config.get("TIME_GRAIN_BLACKLIST", [])
- grains = builtin_time_grains.copy()
- grains.update(config.get("TIME_GRAIN_ADDONS", {}))
+ def get_time_grains(cls) -> Tuple[TimeGrain, ...]:
+ """
+ Generate a tuple of time grains based on time grains provided by the
engine
+ and any potential additional or blacklisted grains in the config file.
+
+ :return: All time grains supported by the engine
+ """
+ blacklist: List[str] = config.get("TIME_GRAIN_BLACKLIST", [])
+ supported_grains = builtin_time_grains.copy()
+ supported_grains.update(config.get("TIME_GRAIN_ADDONS", {}))
grain_functions = cls.time_grain_functions.copy()
grain_addon_functions = config.get("TIME_GRAIN_ADDON_FUNCTIONS", {})
grain_functions.update(grain_addon_functions.get(cls.engine, {}))
- return create_time_grains_tuple(grains, grain_functions, blacklist)
+ return _create_time_grains_tuple(supported_grains, grain_functions,
blacklist)
@classmethod
- def make_select_compatible(cls, groupby_exprs, select_exprs):
- # Some databases will just return the group-by field into the select,
but don't
- # allow the group-by field to be put into the select list.
+ def make_select_compatible(
+ cls, groupby_exprs: Dict[str, ColumnElement], select_exprs:
List[ColumnElement]
+ ) -> List[ColumnElement]:
+ """
+ Some databases will just return the group-by field into the select,
but don't
+ allow the group-by field to be put into the select list.
+
+ :param groupby_exprs: mapping between column name and column object
+ :param select_exprs: all columns in the select clause
+ :return: columns to be included in the final select clause
+ """
return select_exprs
@classmethod
- def fetch_data(cls, cursor, limit):
+ def fetch_data(cls, cursor, limit: int) -> List[Tuple]:
+ """
+
+ :param cursor: Cursor instance
+ :param limit: Maximum number of rows to be returned by the cursor
+ :return: Result of query
+ """
if cls.arraysize:
cursor.arraysize = cls.arraysize
if cls.limit_method == LimitMethod.FETCH_MANY:
@@ -182,6 +225,15 @@ class BaseEngineSpec(object):
def expand_data(
cls, columns: List[dict], data: List[dict]
) -> Tuple[List[dict], List[dict], List[dict]]:
+ """
+ Some engines support expanding nested fields. See implementation in
Presto
+ spec for details.
+
+ :param columns: columns selected in the query
+ :param data: original data set
+ :return: list of all columns(selected columns and their nested fields),
+ expanded data set, listed of nested fields
+ """
return columns, data, []
@classmethod
@@ -191,29 +243,68 @@ class BaseEngineSpec(object):
For instance special column like `__time` for Druid can be
set to is_dttm=True. Note that this only gets called when new
columns are detected/created"""
+ # TODO: Fix circular import caused by importing TableColumn
pass
@classmethod
- def epoch_to_dttm(cls):
+ def epoch_to_dttm(cls) -> str:
+ """
+ SQL expression that converts epoch (seconds) to datetime that can be
used in a
+ query. The reference column should be denoted as `{col}` in the return
+ expression, e.g. "FROM_UNIXTIME({col})"
+
+ :return: SQL Expression
+ """
raise NotImplementedError()
@classmethod
- def epoch_ms_to_dttm(cls):
+ def epoch_ms_to_dttm(cls) -> str:
+ """
+ SQL expression that converts epoch (milliseconds) to datetime that can
be used
+ in a query.
+
+ :return: SQL Expression
+ """
return cls.epoch_to_dttm().replace("{col}", "({col}/1000)")
@classmethod
- def get_datatype(cls, type_code):
+ def get_datatype(cls, type_code: Any) -> Optional[str]:
+ """
+ Change column type code from cursor description to string
representation.
+
+ :param type_code: Type code from cursor description
+ :return: String representation of type code
+ """
if isinstance(type_code, str) and len(type_code):
return type_code.upper()
+ return None
@classmethod
- def extra_table_metadata(cls, database, table_name, schema_name):
- """Returns engine-specific table metadata"""
+ def extra_table_metadata(
+ cls, database, table_name: str, schema_name: str
+ ) -> Dict[str, Any]:
+ """
+ Returns engine-specific table metadata
+
+ :param database: Database instance
+ :param table_name: Table name
+ :param schema_name: Schema name
+ :return: Engine-specific table metadata
+ """
+ # TODO: Fix circular import caused by importing Database
return {}
@classmethod
- def apply_limit_to_sql(cls, sql, limit, database):
- """Alters the SQL statement to apply a LIMIT clause"""
+ def apply_limit_to_sql(cls, sql: str, limit: int, database) -> str:
+ """
+ Alters the SQL statement to apply a LIMIT clause
+
+ :param sql: SQL query
+ :param limit: Maximum number of rows to be returned by the query
+ :param database: Database instance
+ :return: SQL query with limit clause
+ """
+ # TODO: Fix circular import caused by importing Database
if cls.limit_method == LimitMethod.WRAP_SQL:
sql = sql.strip("\t\n ;")
qry = (
@@ -228,12 +319,25 @@ class BaseEngineSpec(object):
return sql
@classmethod
- def get_limit_from_sql(cls, sql):
+ def get_limit_from_sql(cls, sql: str) -> int:
+ """
+ Extract limit from SQL query
+
+ :param sql: SQL query
+ :return: Value of limit clause in query
+ """
parsed_query = sql_parse.ParsedQuery(sql)
return parsed_query.limit
@classmethod
- def get_query_with_new_limit(cls, sql, limit):
+ def get_query_with_new_limit(cls, sql: str, limit: int) -> str:
+ """
+ Create a query based on original query but with new limit clause
+
+ :param sql: SQL query
+ :param limit: New limit to insert/replace into query
+ :return: Query with new limit
+ """
parsed_query = sql_parse.ParsedQuery(sql)
return parsed_query.get_query_with_new_limit(limit)
@@ -315,7 +419,14 @@ class BaseEngineSpec(object):
db.session.commit()
@classmethod
- def convert_dttm(cls, target_type, dttm):
+ def convert_dttm(cls, target_type: str, dttm: datetime) -> str:
+ """
+ Convert DateTime object to sql expression
+
+ :param target_type: Target type of expression
+ :param dttm: DateTime object
+ :return: SQL expression
+ """
return "'{}'".format(dttm.strftime("%Y-%m-%d %H:%M:%S"))
@classmethod
@@ -328,6 +439,7 @@ class BaseEngineSpec(object):
:param datasource_type: Datasource_type can be 'table' or 'view'
:return: List of all datasources in database or schema
"""
+ # TODO: Fix circular import caused by importing Database
schemas = db.get_all_schema_names(
cache=db.schema_cache_enabled,
cache_timeout=db.schema_cache_timeout,
@@ -360,15 +472,16 @@ class BaseEngineSpec(object):
The flow works without this method doing anything, but it allows
for handling the cursor and updating progress information in the
query object"""
+ # TODO: Fix circular import error caused by importing sql_lab.Query
pass
@classmethod
- def extract_error_message(cls, e):
+ def extract_error_message(cls, e: Exception) -> str:
"""Extract error message for queries"""
return utils.error_msg_from_exception(e)
@classmethod
- def adjust_database_uri(cls, uri, selected_schema):
+ def adjust_database_uri(cls, uri, selected_schema: str):
"""Based on a URI and selected schema, return a new URI
The URI here represents the URI as entered when saving the database,
@@ -386,37 +499,90 @@ class BaseEngineSpec(object):
Some database drivers like presto accept '{catalog}/{schema}' in
the database component of the URL, that can be handled here.
"""
+ # TODO: All overrides mutate input uri; should be renamed or refactored
return uri
@classmethod
def patch(cls):
+ """
+ TODO: Improve docstring and refactor implementation in Hive
+ """
pass
@classmethod
- def get_schema_names(cls, inspector):
+ def get_schema_names(cls, inspector: Inspector) -> List[str]:
+ """
+ Get all schemas from database
+
+ :param inspector: SqlAlchemy inspector
+ :return: All schemas in the database
+ """
return sorted(inspector.get_schema_names())
@classmethod
- def get_table_names(cls, inspector, schema):
+ def get_table_names(cls, inspector: Inspector, schema: Optional[str]) ->
List[str]:
+ """
+ Get all tables from schema
+
+ :param inspector: SqlAlchemy inspector
+ :param schema: Schema to inspect. If omitted, uses default schema for
database
+ :return: All tables in schema
+ """
tables = inspector.get_table_names(schema)
if schema and cls.try_remove_schema_from_table_name:
tables = [re.sub(f"^{schema}\\.", "", table) for table in tables]
return sorted(tables)
@classmethod
- def get_view_names(cls, inspector, schema):
+ def get_view_names(cls, inspector: Inspector, schema: Optional[str]) ->
List[str]:
+ """
+ Get all views from schema
+
+ :param inspector: SqlAlchemy inspector
+ :param schema: Schema name. If omitted, uses default schema for
database
+ :return: All views in schema
+ """
views = inspector.get_view_names(schema)
if schema and cls.try_remove_schema_from_table_name:
views = [re.sub(f"^{schema}\\.", "", view) for view in views]
return sorted(views)
@classmethod
- def get_columns(cls, inspector: Inspector, table_name: str, schema: str)
-> list:
+ def get_columns(
+ cls, inspector: Inspector, table_name: str, schema: Optional[str]
+ ) -> List[Dict[str, Any]]:
+ """
+ Get all columns from a given schema and table
+
+ :param inspector: SqlAlchemy Inspector instance
+ :param table_name: Table name
+ :param schema: Schema name. If omitted, uses default schema for
database
+ :return: All columns in table
+ """
return inspector.get_columns(table_name, schema)
@classmethod
- def where_latest_partition(cls, table_name, schema, database, qry,
columns=None):
- return False
+ def where_latest_partition(
+ cls,
+ table_name: str,
+ schema: Optional[str],
+ database,
+ query: Select,
+ columns: Optional[List] = None,
+ ) -> Optional[Select]:
+ """
+ Add a where clause to a query to reference only the most recent
partition
+
+ :param table_name: Table name
+ :param schema: Schema name
+ :param database: Database instance
+ :param query: SqlAlchemy query
+ :param columns: List of TableColumns
+ :return: SqlAlchemy query with additional where clause referencing
latest
+ partition
+ """
+ # TODO: Fix circular import caused by importing Database, TableColumn
+ return None
@classmethod
def _get_fields(cls, cols):
@@ -425,20 +591,34 @@ class BaseEngineSpec(object):
@classmethod
def select_star(
cls,
- my_db,
- table_name,
- engine,
- schema=None,
- limit=100,
- show_cols=False,
- indent=True,
- latest_partition=True,
- cols=None,
- ):
+ database,
+ table_name: str,
+ engine: Engine,
+ schema: Optional[str] = None,
+ limit: int = 100,
+ show_cols: bool = False,
+ indent: bool = True,
+ latest_partition: bool = True,
+ cols: Optional[List[Dict[str, Any]]] = None,
+ ) -> str:
+ """
+ Generate a "SELECT * from [schema.]table_name" query with appropriate
limit.
+
+ :param database: Database instance
+ :param table_name: Table name
+ :param engine: SqlALchemy Engine instance
+ :param schema: Schema
+ :param limit: limit to impose on query
+ :param show_cols: Show columns in query; otherwise use "*"
+ :param indent: Add indentation to query
+ :param latest_partition: Only query latest partition
+ :param cols: Columns to include in query
+ :return: SQL query
+ """
fields = "*"
cols = cols or []
if (show_cols or latest_partition) and not cols:
- cols = my_db.get_columns(table_name, schema)
+ cols = database.get_columns(table_name, schema)
if show_cols:
fields = cls._get_fields(cols)
@@ -454,75 +634,93 @@ class BaseEngineSpec(object):
qry = qry.limit(limit)
if latest_partition:
partition_query = cls.where_latest_partition(
- table_name, schema, my_db, qry, columns=cols
+ table_name, schema, database, qry, columns=cols
)
- if partition_query != False: # noqa
+ if partition_query is not None:
qry = partition_query
- sql = my_db.compile_sqla_query(qry)
+ sql = database.compile_sqla_query(qry)
if indent:
sql = sqlparse.format(sql, reindent=True)
return sql
@classmethod
- def modify_url_for_impersonation(cls, url, impersonate_user, username):
+ def modify_url_for_impersonation(cls, url, impersonate_user: bool,
username: str):
"""
Modify the SQL Alchemy URL object with the user to impersonate if
applicable.
:param url: SQLAlchemy URL object
- :param impersonate_user: Bool indicating if impersonation is enabled
+ :param impersonate_user: Flag indicating if impersonation is enabled
:param username: Effective username
"""
if impersonate_user is not None and username is not None:
url.username = username
@classmethod
- def get_configuration_for_impersonation(cls, uri, impersonate_user,
username):
+ def get_configuration_for_impersonation(
+ cls, uri: str, impersonate_user: bool, username: str
+ ) -> Dict[str, str]:
"""
Return a configuration dictionary that can be merged with other configs
that can set the correct properties for impersonating users
- :param uri: URI string
- :param impersonate_user: Bool indicating if impersonation is enabled
+
+ :param uri: URI
+ :param impersonate_user: Flag indicating if impersonation is enabled
:param username: Effective username
- :return: Dictionary with configs required for impersonation
+ :return: Configs required for impersonation
"""
return {}
@classmethod
- def execute(cls, cursor, query, **kwargs):
+ def execute(cls, cursor, query: str, **kwargs):
+ """
+ Execute a SQL query
+
+ :param cursor: Cursor instance
+ :param query: Query to execute
+ :param kwargs: kwargs to be passed to cursor.execute()
+ :return:
+ """
if cls.arraysize:
cursor.arraysize = cls.arraysize
cursor.execute(query)
@classmethod
- def make_label_compatible(cls, label):
+ def make_label_compatible(cls, label: str) -> Union[str, quoted_name]:
"""
- Conditionally mutate and/or quote a sql column/expression label. If
+ Conditionally mutate and/or quote a sqlalchemy expression label. If
force_column_alias_quotes is set to True, return the label as a
sqlalchemy.sql.elements.quoted_name object to ensure that the select
query
and query results have same case. Otherwise return the mutated label
as a
regular string. If maxmimum supported column name length is exceeded,
generate a truncated label by calling truncate_label().
+
+ :param label: expected expression label/alias
+ :return: conditionally mutated label supported by the db engine
"""
- label_mutated = cls.mutate_label(label)
+ label_mutated = cls._mutate_label(label)
if (
cls.max_column_name_length
and len(label_mutated) > cls.max_column_name_length
):
- label_mutated = cls.truncate_label(label)
+ label_mutated = cls._truncate_label(label)
if cls.force_column_alias_quotes:
label_mutated = quoted_name(label_mutated, True)
return label_mutated
@classmethod
- def get_sqla_column_type(cls, type_):
+ def get_sqla_column_type(cls, type_: str) -> Optional[TypeEngine]:
"""
Return a sqlalchemy native column type that corresponds to the column
type
- defined in the data source (optional). Needs to be overridden if
column requires
- special handling (see MSSQL for example of NCHAR/NVARCHAR handling).
+ defined in the data source (return None to use default type inferred by
+ SQLAlchemy). Needs to be overridden if column requires special handling
+ (see MSSQL for example of NCHAR/NVARCHAR handling).
+
+ :param type_: Column type returned by inspector
+ :return: SqlAlchemy column type
"""
return None
@staticmethod
- def mutate_label(label):
+ def _mutate_label(label: str) -> str:
"""
Most engines support mixed case aliases that can include numbers
and special characters, like commas, parentheses etc. For engines that
@@ -531,15 +729,23 @@ class BaseEngineSpec(object):
limitations. Mutated labels should be deterministic (input label A
always
yields output label X) and unique (input labels A and B don't yield
the same
output label X).
+
+ :param label: Preferred expression label
+ :return: Conditionally mutated label
"""
return label
@classmethod
- def truncate_label(cls, label):
+ def _truncate_label(cls, label: str) -> str:
"""
In the case that a label exceeds the max length supported by the
engine,
this method is used to construct a deterministic and unique label
based on
- an md5 hash.
+ the original label. By default this returns an md5 hash of the
original label,
+ conditionally truncated if the length of the hash exceeds the max
column length
+ of the engine.
+
+ :param label: Expected expression label
+ :return: Truncated label
"""
label = hashlib.md5(label.encode("utf-8")).hexdigest()
# truncate hash if it exceeds max length
@@ -548,5 +754,15 @@ class BaseEngineSpec(object):
return label
@classmethod
- def column_datatype_to_string(cls, sqla_column_type, dialect):
+ def column_datatype_to_string(
+ cls, sqla_column_type: TypeEngine, dialect: Dialect
+ ) -> str:
+ """
+ Convert sqlalchemy column type to string representation. Can be
overridden to remove
+ unnecessary details, especially collation info (see mysql, mssql).
+
+ :param sqla_column_type: SqlAlchemy column type
+ :param dialect: Sqlalchemy dialect
+ :return: Compiled column type
+ """
return sqla_column_type.compile(dialect=dialect).upper()
diff --git a/superset/db_engine_specs/bigquery.py
b/superset/db_engine_specs/bigquery.py
index 23c1c9f..addf4fd 100644
--- a/superset/db_engine_specs/bigquery.py
+++ b/superset/db_engine_specs/bigquery.py
@@ -14,8 +14,10 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
+from datetime import datetime
import hashlib
import re
+from typing import Any, Dict, List, Tuple
import pandas as pd
from sqlalchemy import literal_column
@@ -56,28 +58,29 @@ class BigQueryEngineSpec(BaseEngineSpec):
}
@classmethod
- def convert_dttm(cls, target_type, dttm):
+ def convert_dttm(cls, target_type: str, dttm: datetime) -> str:
tt = target_type.upper()
if tt == "DATE":
return "'{}'".format(dttm.strftime("%Y-%m-%d"))
return "'{}'".format(dttm.strftime("%Y-%m-%d %H:%M:%S"))
@classmethod
- def fetch_data(cls, cursor, limit):
+ def fetch_data(cls, cursor, limit: int) -> List[Tuple]:
data = super(BigQueryEngineSpec, cls).fetch_data(cursor, limit)
if data and type(data[0]).__name__ == "Row":
data = [r.values() for r in data]
return data
@staticmethod
- def mutate_label(label):
+ def _mutate_label(label: str) -> str:
"""
BigQuery field_name should start with a letter or underscore and
contain only
alphanumeric characters. Labels that start with a number are prefixed
with an
underscore. Any unsupported characters are replaced with underscores
and an
md5 hash is added to the end of the label to avoid possible collisions.
- :param str label: the original label which might include unsupported
characters
- :return: String that is supported by the database
+
+ :param label: Expected expression label
+ :return: Conditionally mutated label
"""
label_hashed = "_" + hashlib.md5(label.encode("utf-8")).hexdigest()
@@ -93,15 +96,20 @@ class BigQueryEngineSpec(BaseEngineSpec):
return label_mutated
@classmethod
- def truncate_label(cls, label):
+ def _truncate_label(cls, label: str) -> str:
"""BigQuery requires column names start with either a letter or
underscore. To make sure this is always the case, an underscore is
prefixed
- to the truncated label.
+ to the md5 hash of the original label.
+
+ :param label: expected expression label
+ :return: truncated label
"""
return "_" + hashlib.md5(label.encode("utf-8")).hexdigest()
@classmethod
- def extra_table_metadata(cls, database, table_name, schema_name):
+ def extra_table_metadata(
+ cls, database, table_name: str, schema_name: str
+ ) -> Dict[str, Any]:
indexes = database.get_indexes(table_name, schema_name)
if not indexes:
return {}
@@ -136,11 +144,11 @@ class BigQueryEngineSpec(BaseEngineSpec):
]
@classmethod
- def epoch_to_dttm(cls):
+ def epoch_to_dttm(cls) -> str:
return "TIMESTAMP_SECONDS({col})"
@classmethod
- def epoch_ms_to_dttm(cls):
+ def epoch_ms_to_dttm(cls) -> str:
return "TIMESTAMP_MILLIS({col})"
@classmethod
diff --git a/superset/db_engine_specs/clickhouse.py
b/superset/db_engine_specs/clickhouse.py
index 35e7e66..1095e1f 100644
--- a/superset/db_engine_specs/clickhouse.py
+++ b/superset/db_engine_specs/clickhouse.py
@@ -15,6 +15,8 @@
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
+from datetime import datetime
+
from superset.db_engine_specs.base import BaseEngineSpec
@@ -42,7 +44,7 @@ class ClickHouseEngineSpec(BaseEngineSpec):
}
@classmethod
- def convert_dttm(cls, target_type, dttm):
+ def convert_dttm(cls, target_type: str, dttm: datetime) -> str:
tt = target_type.upper()
if tt == "DATE":
return "toDate('{}')".format(dttm.strftime("%Y-%m-%d"))
diff --git a/superset/db_engine_specs/db2.py b/superset/db_engine_specs/db2.py
index 2e70ff3..30822e3 100644
--- a/superset/db_engine_specs/db2.py
+++ b/superset/db_engine_specs/db2.py
@@ -15,6 +15,8 @@
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
+from datetime import datetime
+
from superset.db_engine_specs.base import BaseEngineSpec, LimitMethod
@@ -48,9 +50,9 @@ class Db2EngineSpec(BaseEngineSpec):
}
@classmethod
- def epoch_to_dttm(cls):
+ def epoch_to_dttm(cls) -> str:
return "(TIMESTAMP('1970-01-01', '00:00:00') + {col} SECONDS)"
@classmethod
- def convert_dttm(cls, target_type, dttm):
+ def convert_dttm(cls, target_type: str, dttm: datetime) -> str:
return "'{}'".format(dttm.strftime("%Y-%m-%d-%H.%M.%S"))
diff --git a/superset/db_engine_specs/drill.py
b/superset/db_engine_specs/drill.py
index 4c23159..d82a25f 100644
--- a/superset/db_engine_specs/drill.py
+++ b/superset/db_engine_specs/drill.py
@@ -15,6 +15,7 @@
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
+from datetime import datetime
from urllib import parse
from superset.db_engine_specs.base import BaseEngineSpec
@@ -41,15 +42,15 @@ class DrillEngineSpec(BaseEngineSpec):
# Returns a function to convert a Unix timestamp in milliseconds to a date
@classmethod
- def epoch_to_dttm(cls):
+ def epoch_to_dttm(cls) -> str:
return cls.epoch_ms_to_dttm().replace("{col}", "({col}*1000)")
@classmethod
- def epoch_ms_to_dttm(cls):
+ def epoch_ms_to_dttm(cls) -> str:
return "TO_DATE({col})"
@classmethod
- def convert_dttm(cls, target_type, dttm):
+ def convert_dttm(cls, target_type: str, dttm: datetime) -> str:
tt = target_type.upper()
if tt == "DATE":
return "CAST('{}' AS DATE)".format(dttm.isoformat()[:10])
diff --git a/superset/db_engine_specs/hive.py b/superset/db_engine_specs/hive.py
index 3668651..688d835 100644
--- a/superset/db_engine_specs/hive.py
+++ b/superset/db_engine_specs/hive.py
@@ -15,11 +15,12 @@
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
+from datetime import datetime
import logging
import os
import re
import time
-from typing import List
+from typing import Any, Dict, List, Optional, Tuple
from urllib import parse
from sqlalchemy import Column
@@ -85,7 +86,7 @@ class HiveEngineSpec(PrestoEngineSpec):
return BaseEngineSpec.get_all_datasource_names(db, datasource_type)
@classmethod
- def fetch_data(cls, cursor, limit):
+ def fetch_data(cls, cursor, limit: int) -> List[Tuple]:
import pyhive
from TCLIService import ttypes
@@ -179,7 +180,7 @@ class HiveEngineSpec(PrestoEngineSpec):
engine.execute(sql)
@classmethod
- def convert_dttm(cls, target_type, dttm):
+ def convert_dttm(cls, target_type: str, dttm: datetime) -> str:
tt = target_type.upper()
if tt == "DATE":
return "CAST('{}' AS DATE)".format(dttm.isoformat()[:10])
@@ -291,8 +292,8 @@ class HiveEngineSpec(PrestoEngineSpec):
@classmethod
def get_columns(
- cls, inspector: Inspector, table_name: str, schema: str
- ) -> List[dict]:
+ cls, inspector: Inspector, table_name: str, schema: Optional[str]
+ ) -> List[Dict[str, Any]]:
return inspector.get_columns(table_name, schema)
@classmethod
@@ -332,7 +333,7 @@ class HiveEngineSpec(PrestoEngineSpec):
@classmethod
def select_star(
cls,
- my_db,
+ database,
table_name: str,
engine: Engine,
schema: str = None,
@@ -340,10 +341,10 @@ class HiveEngineSpec(PrestoEngineSpec):
show_cols: bool = False,
indent: bool = True,
latest_partition: bool = True,
- cols: List[dict] = [],
+ cols: Optional[List[Dict[str, Any]]] = None,
) -> str:
return BaseEngineSpec.select_star(
- my_db,
+ database,
table_name,
engine,
schema,
@@ -355,11 +356,11 @@ class HiveEngineSpec(PrestoEngineSpec):
)
@classmethod
- def modify_url_for_impersonation(cls, url, impersonate_user, username):
+ def modify_url_for_impersonation(cls, url, impersonate_user: bool,
username: str):
"""
Modify the SQL Alchemy URL object with the user to impersonate if
applicable.
:param url: SQLAlchemy URL object
- :param impersonate_user: Bool indicating if impersonation is enabled
+ :param impersonate_user: Flag indicating if impersonation is enabled
:param username: Effective username
"""
# Do nothing in the URL object since instead this should modify
@@ -367,14 +368,16 @@ class HiveEngineSpec(PrestoEngineSpec):
pass
@classmethod
- def get_configuration_for_impersonation(cls, uri, impersonate_user,
username):
+ def get_configuration_for_impersonation(
+ cls, uri: str, impersonate_user: bool, username: str
+ ) -> Dict[str, str]:
"""
Return a configuration dictionary that can be merged with other configs
that can set the correct properties for impersonating users
:param uri: URI string
- :param impersonate_user: Bool indicating if impersonation is enabled
+ :param impersonate_user: Flag indicating if impersonation is enabled
:param username: Effective username
- :return: Dictionary with configs required for impersonation
+ :return: Configs required for impersonation
"""
configuration = {}
url = make_url(uri)
@@ -392,6 +395,6 @@ class HiveEngineSpec(PrestoEngineSpec):
return configuration
@staticmethod
- def execute(cursor, query, async_=False):
+ def execute(cursor, query: str, async_: bool = False):
kwargs = {"async": async_}
cursor.execute(query, **kwargs)
diff --git a/superset/db_engine_specs/impala.py
b/superset/db_engine_specs/impala.py
index f7db9a1..f2fa57a 100644
--- a/superset/db_engine_specs/impala.py
+++ b/superset/db_engine_specs/impala.py
@@ -15,6 +15,11 @@
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
+from datetime import datetime
+from typing import List
+
+from sqlalchemy.engine.reflection import Inspector
+
from superset.db_engine_specs.base import BaseEngineSpec
@@ -35,18 +40,18 @@ class ImpalaEngineSpec(BaseEngineSpec):
}
@classmethod
- def epoch_to_dttm(cls):
+ def epoch_to_dttm(cls) -> str:
return "from_unixtime({col})"
@classmethod
- def convert_dttm(cls, target_type, dttm):
+ def convert_dttm(cls, target_type: str, dttm: datetime) -> str:
tt = target_type.upper()
if tt == "DATE":
return "'{}'".format(dttm.strftime("%Y-%m-%d"))
return "'{}'".format(dttm.strftime("%Y-%m-%d %H:%M:%S"))
@classmethod
- def get_schema_names(cls, inspector):
+ def get_schema_names(cls, inspector: Inspector) -> List[str]:
schemas = [
row[0]
for row in inspector.engine.execute("SHOW SCHEMAS")
diff --git a/superset/db_engine_specs/kylin.py
b/superset/db_engine_specs/kylin.py
index 792ad31..b3ebe81 100644
--- a/superset/db_engine_specs/kylin.py
+++ b/superset/db_engine_specs/kylin.py
@@ -15,6 +15,8 @@
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
+from datetime import datetime
+
from superset.db_engine_specs.base import BaseEngineSpec
@@ -38,7 +40,7 @@ class KylinEngineSpec(BaseEngineSpec):
}
@classmethod
- def convert_dttm(cls, target_type, dttm):
+ def convert_dttm(cls, target_type: str, dttm: datetime) -> str:
tt = target_type.upper()
if tt == "DATE":
return "CAST('{}' AS DATE)".format(dttm.isoformat()[:10])
diff --git a/superset/db_engine_specs/mssql.py
b/superset/db_engine_specs/mssql.py
index 1751146..0c46851 100644
--- a/superset/db_engine_specs/mssql.py
+++ b/superset/db_engine_specs/mssql.py
@@ -15,9 +15,12 @@
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
+from datetime import datetime
import re
+from typing import List, Optional, Tuple
-from sqlalchemy.types import String, UnicodeText
+from sqlalchemy.engine.interfaces import Dialect
+from sqlalchemy.types import String, TypeEngine, UnicodeText
from superset.db_engine_specs.base import BaseEngineSpec, LimitMethod
@@ -45,12 +48,12 @@ class MssqlEngineSpec(BaseEngineSpec):
}
@classmethod
- def convert_dttm(cls, target_type, dttm):
+ def convert_dttm(cls, target_type: str, dttm: datetime) -> str:
return "CONVERT(DATETIME, '{}', 126)".format(dttm.isoformat())
@classmethod
- def fetch_data(cls, cursor, limit):
- data = super(MssqlEngineSpec, cls).fetch_data(cursor, limit)
+ def fetch_data(cls, cursor, limit: int) -> List[Tuple]:
+ data = super().fetch_data(cursor, limit)
if data and type(data[0]).__name__ == "Row":
data = [[elem for elem in r] for r in data]
return data
@@ -61,14 +64,16 @@ class MssqlEngineSpec(BaseEngineSpec):
]
@classmethod
- def get_sqla_column_type(cls, type_):
+ def get_sqla_column_type(cls, type_: str) -> Optional[TypeEngine]:
for sqla_type, regex in cls.column_types:
if regex.match(type_):
return sqla_type
return None
@classmethod
- def column_datatype_to_string(cls, sqla_column_type, dialect):
+ def column_datatype_to_string(
+ cls, sqla_column_type: TypeEngine, dialect: Dialect
+ ) -> str:
datatype = super().column_datatype_to_string(sqla_column_type, dialect)
# MSSQL returns long overflowing datatype
# as in 'VARCHAR(255) COLLATE SQL_LATIN1_GENERAL_CP1_CI_AS'
diff --git a/superset/db_engine_specs/mysql.py
b/superset/db_engine_specs/mysql.py
index 37ecaee..3a48032 100644
--- a/superset/db_engine_specs/mysql.py
+++ b/superset/db_engine_specs/mysql.py
@@ -15,9 +15,13 @@
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
-from typing import Dict
+from datetime import datetime
+from typing import Any, Dict, Optional
from urllib import parse
+from sqlalchemy.engine.interfaces import Dialect
+from sqlalchemy.types import TypeEngine
+
from superset.db_engine_specs.base import BaseEngineSpec
@@ -47,7 +51,7 @@ class MySQLEngineSpec(BaseEngineSpec):
type_code_map: Dict[int, str] = {} # loaded from get_datatype only if
needed
@classmethod
- def convert_dttm(cls, target_type, dttm):
+ def convert_dttm(cls, target_type: str, dttm: datetime) -> str:
if target_type.upper() in ("DATETIME", "DATE"):
return "STR_TO_DATE('{}', '%Y-%m-%d %H:%i:%s')".format(
dttm.strftime("%Y-%m-%d %H:%M:%S")
@@ -61,7 +65,7 @@ class MySQLEngineSpec(BaseEngineSpec):
return uri
@classmethod
- def get_datatype(cls, type_code):
+ def get_datatype(cls, type_code: Any) -> Optional[str]:
if not cls.type_code_map:
# only import and store if needed at least once
import MySQLdb # pylint: disable=import-error
@@ -75,9 +79,10 @@ class MySQLEngineSpec(BaseEngineSpec):
datatype = cls.type_code_map.get(type_code)
if datatype and isinstance(datatype, str) and len(datatype):
return datatype
+ return None
@classmethod
- def epoch_to_dttm(cls):
+ def epoch_to_dttm(cls) -> str:
return "from_unixtime({col})"
@classmethod
@@ -92,7 +97,9 @@ class MySQLEngineSpec(BaseEngineSpec):
return message
@classmethod
- def column_datatype_to_string(cls, sqla_column_type, dialect):
+ def column_datatype_to_string(
+ cls, sqla_column_type: TypeEngine, dialect: Dialect
+ ) -> str:
datatype = super().column_datatype_to_string(sqla_column_type, dialect)
# MySQL dialect started returning long overflowing datatype
# as in 'VARCHAR(255) COLLATE UTF8MB4_GENERAL_CI'
diff --git a/superset/db_engine_specs/oracle.py
b/superset/db_engine_specs/oracle.py
index 6f36324..eaa815c 100644
--- a/superset/db_engine_specs/oracle.py
+++ b/superset/db_engine_specs/oracle.py
@@ -15,6 +15,8 @@
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
+from datetime import datetime
+
from superset.db_engine_specs.base import LimitMethod
from superset.db_engine_specs.postgres import PostgresBaseEngineSpec
@@ -38,7 +40,7 @@ class OracleEngineSpec(PostgresBaseEngineSpec):
}
@classmethod
- def convert_dttm(cls, target_type, dttm):
+ def convert_dttm(cls, target_type: str, dttm: datetime) -> str:
return ("""TO_TIMESTAMP('{}',
'YYYY-MM-DD"T"HH24:MI:SS.ff6')""").format(
dttm.isoformat()
)
diff --git a/superset/db_engine_specs/pinot.py
b/superset/db_engine_specs/pinot.py
index 132cb48..f4bb843 100644
--- a/superset/db_engine_specs/pinot.py
+++ b/superset/db_engine_specs/pinot.py
@@ -15,9 +15,9 @@
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
-from typing import Dict, Optional
+from typing import Dict, List, Optional
-from sqlalchemy.sql.expression import ColumnClause
+from sqlalchemy.sql.expression import ColumnClause, ColumnElement
from superset.db_engine_specs.base import BaseEngineSpec, TimestampExpression
@@ -60,7 +60,9 @@ class PinotEngineSpec(BaseEngineSpec):
return TimestampExpression(time_expr, col)
@classmethod
- def make_select_compatible(cls, groupby_exprs, select_exprs):
+ def make_select_compatible(
+ cls, groupby_exprs: Dict[str, ColumnElement], select_exprs:
List[ColumnElement]
+ ) -> List[ColumnElement]:
# Pinot does not want the group by expr's to appear in the select
clause
select_sans_groupby = []
# We want identity and not equality, so doing the filtering manually
diff --git a/superset/db_engine_specs/postgres.py
b/superset/db_engine_specs/postgres.py
index b8d0506..6bfe12d 100644
--- a/superset/db_engine_specs/postgres.py
+++ b/superset/db_engine_specs/postgres.py
@@ -15,6 +15,11 @@
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
+from datetime import datetime
+from typing import List, Optional, Tuple
+
+from sqlalchemy.dialects.postgresql.base import PGInspector
+
from superset.db_engine_specs.base import BaseEngineSpec, LimitMethod
@@ -36,7 +41,7 @@ class PostgresBaseEngineSpec(BaseEngineSpec):
}
@classmethod
- def fetch_data(cls, cursor, limit):
+ def fetch_data(cls, cursor, limit: int) -> List[Tuple]:
if not cursor.description:
return []
if cls.limit_method == LimitMethod.FETCH_MANY:
@@ -44,11 +49,11 @@ class PostgresBaseEngineSpec(BaseEngineSpec):
return cursor.fetchall()
@classmethod
- def epoch_to_dttm(cls):
+ def epoch_to_dttm(cls) -> str:
return "(timestamp 'epoch' + {col} * interval '1 second')"
@classmethod
- def convert_dttm(cls, target_type, dttm):
+ def convert_dttm(cls, target_type: str, dttm: datetime) -> str:
return "'{}'".format(dttm.strftime("%Y-%m-%d %H:%M:%S"))
@@ -58,7 +63,9 @@ class PostgresEngineSpec(PostgresBaseEngineSpec):
try_remove_schema_from_table_name = False
@classmethod
- def get_table_names(cls, inspector, schema):
+ def get_table_names(
+ cls, inspector: PGInspector, schema: Optional[str]
+ ) -> List[str]:
"""Need to consider foreign tables for PostgreSQL"""
tables = inspector.get_table_names(schema)
tables.extend(inspector.get_foreign_table_names(schema))
diff --git a/superset/db_engine_specs/presto.py
b/superset/db_engine_specs/presto.py
index ff03d54..86cbcb4 100644
--- a/superset/db_engine_specs/presto.py
+++ b/superset/db_engine_specs/presto.py
@@ -16,19 +16,20 @@
# under the License.
# pylint: disable=C,R,W
from collections import OrderedDict
+from datetime import datetime
from distutils.version import StrictVersion
import logging
import re
import textwrap
import time
-from typing import List, Set, Tuple
+from typing import Any, Dict, List, Optional, Set, Tuple
from urllib import parse
from sqlalchemy import Column, literal_column
from sqlalchemy.engine.base import Engine
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.engine.result import RowProxy
-from sqlalchemy.sql.expression import ColumnClause
+from sqlalchemy.sql.expression import ColumnClause, Select
from superset import is_feature_enabled
from superset.db_engine_specs.base import BaseEngineSpec
@@ -59,7 +60,7 @@ class PrestoEngineSpec(BaseEngineSpec):
}
@classmethod
- def get_view_names(cls, inspector, schema):
+ def get_view_names(cls, inspector: Inspector, schema: Optional[str]) ->
List[str]:
"""Returns an empty list
get_table_names() function returns all table names and view names,
@@ -197,7 +198,7 @@ class PrestoEngineSpec(BaseEngineSpec):
@classmethod
def _show_columns(
- cls, inspector: Inspector, table_name: str, schema: str
+ cls, inspector: Inspector, table_name: str, schema: Optional[str]
) -> List[RowProxy]:
"""
Show presto column names
@@ -215,8 +216,8 @@ class PrestoEngineSpec(BaseEngineSpec):
@classmethod
def get_columns(
- cls, inspector: Inspector, table_name: str, schema: str
- ) -> List[dict]:
+ cls, inspector: Inspector, table_name: str, schema: Optional[str]
+ ) -> List[Dict[str, Any]]:
"""
Get columns from a Presto data source. This includes handling row and
array data types
@@ -338,7 +339,7 @@ class PrestoEngineSpec(BaseEngineSpec):
@classmethod
def select_star(
cls,
- my_db,
+ database,
table_name: str,
engine: Engine,
schema: str = None,
@@ -346,21 +347,22 @@ class PrestoEngineSpec(BaseEngineSpec):
show_cols: bool = False,
indent: bool = True,
latest_partition: bool = True,
- cols: List[dict] = [],
+ cols: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Include selecting properties of row objects. We cannot easily break
arrays into
rows, so render the whole array in its own row and skip columns that
correspond
to an array's contents.
"""
+ cols = cols or []
presto_cols = cols
if is_feature_enabled("PRESTO_EXPAND_DATA") and show_cols:
dot_regex = r"\.(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)"
presto_cols = [
col for col in presto_cols if not re.search(dot_regex,
col["name"])
]
- return super(PrestoEngineSpec, cls).select_star(
- my_db,
+ return super().select_star(
+ database,
table_name,
engine,
schema,
@@ -384,7 +386,7 @@ class PrestoEngineSpec(BaseEngineSpec):
return uri
@classmethod
- def convert_dttm(cls, target_type, dttm):
+ def convert_dttm(cls, target_type: str, dttm: datetime) -> str:
tt = target_type.upper()
if tt == "DATE":
return "from_iso8601_date('{}')".format(dttm.isoformat()[:10])
@@ -393,7 +395,7 @@ class PrestoEngineSpec(BaseEngineSpec):
return "'{}'".format(dttm.strftime("%Y-%m-%d %H:%M:%S"))
@classmethod
- def epoch_to_dttm(cls):
+ def epoch_to_dttm(cls) -> str:
return "from_unixtime({col})"
@classmethod
@@ -796,7 +798,9 @@ class PrestoEngineSpec(BaseEngineSpec):
return all_columns, data, expanded_columns
@classmethod
- def extra_table_metadata(cls, database, table_name, schema_name):
+ def extra_table_metadata(
+ cls, database, table_name: str, schema_name: str
+ ) -> Dict[str, Any]:
indexes = database.get_indexes(table_name, schema_name)
if not indexes:
return {}
@@ -808,6 +812,7 @@ class PrestoEngineSpec(BaseEngineSpec):
col_names, latest_parts = cls.latest_partition(
table_name, schema_name, database, show_first=True
)
+ latest_parts = latest_parts or tuple([None] * len(col_names))
return {
"partitions": {
"cols": cols,
@@ -931,7 +936,14 @@ class PrestoEngineSpec(BaseEngineSpec):
return sql
@classmethod
- def where_latest_partition(cls, table_name, schema, database, qry,
columns=None):
+ def where_latest_partition(
+ cls,
+ table_name: str,
+ schema: str,
+ database,
+ query: Select,
+ columns: Optional[List] = None,
+ ) -> Optional[Select]:
try:
col_names, values = cls.latest_partition(
table_name, schema, database, show_first=True
@@ -946,22 +958,23 @@ class PrestoEngineSpec(BaseEngineSpec):
column_names = {column.get("name") for column in columns or []}
for col_name, value in zip(col_names, values):
if col_name in column_names:
- qry = qry.where(Column(col_name) == value)
- return qry
+ query = query.where(Column(col_name) == value)
+ return query
@classmethod
def _latest_partition_from_df(cls, df):
if not df.empty:
return df.to_records(index=False)[0].item()
+ return None
@classmethod
- def latest_partition(cls, table_name, schema, database, show_first=False):
+ def latest_partition(
+ cls, table_name: str, schema: str, database, show_first: bool = False
+ ):
"""Returns col name and the latest (max) partition value for a table
:param table_name: the name of the table
- :type table_name: str
:param schema: schema / database / namespace
- :type schema: str
:param database: database query will be run against
:type database: models.Database
:param show_first: displays the value for the first partitioning key
@@ -969,7 +982,7 @@ class PrestoEngineSpec(BaseEngineSpec):
:type show_first: bool
>>> latest_partition('foo_table')
- ('ds', '2018-01-01')
+ (['ds'], ('2018-01-01',))
"""
indexes = database.get_indexes(table_name, schema)
if len(indexes[0]["column_names"]) < 1:
diff --git a/superset/db_engine_specs/redshift.py
b/superset/db_engine_specs/redshift.py
index 88af7d9..af4dc54 100644
--- a/superset/db_engine_specs/redshift.py
+++ b/superset/db_engine_specs/redshift.py
@@ -23,10 +23,11 @@ class RedshiftEngineSpec(PostgresBaseEngineSpec):
max_column_name_length = 127
@staticmethod
- def mutate_label(label):
+ def _mutate_label(label: str) -> str:
"""
Redshift only supports lowercase column names and aliases.
- :param str label: Original label which might include uppercase letters
- :return: String that is supported by the database
+
+ :param label: Expected expression label
+ :return: Conditionally mutated label
"""
return label.lower()
diff --git a/superset/db_engine_specs/snowflake.py
b/superset/db_engine_specs/snowflake.py
index 4ef0f5a..07d266d 100644
--- a/superset/db_engine_specs/snowflake.py
+++ b/superset/db_engine_specs/snowflake.py
@@ -56,9 +56,9 @@ class SnowflakeEngineSpec(PostgresBaseEngineSpec):
return uri
@classmethod
- def epoch_to_dttm(cls):
+ def epoch_to_dttm(cls) -> str:
return "DATEADD(S, {col}, '1970-01-01')"
@classmethod
- def epoch_ms_to_dttm(cls):
+ def epoch_ms_to_dttm(cls) -> str:
return "DATEADD(MS, {col}, '1970-01-01')"
diff --git a/superset/db_engine_specs/sqlite.py
b/superset/db_engine_specs/sqlite.py
index b6afbd9..58a2c67 100644
--- a/superset/db_engine_specs/sqlite.py
+++ b/superset/db_engine_specs/sqlite.py
@@ -15,8 +15,11 @@
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
+from datetime import datetime
from typing import List
+from sqlalchemy.engine.reflection import Inspector
+
from superset.db_engine_specs.base import BaseEngineSpec
from superset.utils import core as utils
@@ -38,7 +41,7 @@ class SqliteEngineSpec(BaseEngineSpec):
}
@classmethod
- def epoch_to_dttm(cls):
+ def epoch_to_dttm(cls) -> str:
return "datetime({col}, 'unixepoch')"
@classmethod
@@ -69,13 +72,13 @@ class SqliteEngineSpec(BaseEngineSpec):
raise Exception(f"Unsupported datasource_type: {datasource_type}")
@classmethod
- def convert_dttm(cls, target_type, dttm):
+ def convert_dttm(cls, target_type: str, dttm: datetime) -> str:
iso = dttm.isoformat().replace("T", " ")
if "." not in iso:
iso += ".000000"
return "'{}'".format(iso)
@classmethod
- def get_table_names(cls, inspector, schema):
+ def get_table_names(cls, inspector: Inspector, schema: str) -> List[str]:
"""Need to disregard the schema for Sqlite"""
return sorted(inspector.get_table_names())
diff --git a/superset/db_engine_specs/teradata.py
b/superset/db_engine_specs/teradata.py
index d697378..ceba88c 100644
--- a/superset/db_engine_specs/teradata.py
+++ b/superset/db_engine_specs/teradata.py
@@ -37,7 +37,7 @@ class TeradataEngineSpec(BaseEngineSpec):
}
@classmethod
- def epoch_to_dttm(cls):
+ def epoch_to_dttm(cls) -> str:
return (
"CAST(((CAST(DATE '1970-01-01' + ({col} / 86400) AS TIMESTAMP(0) "
"AT 0)) AT 0) + (({col} MOD 86400) * INTERVAL '00:00:01' "
diff --git a/superset/sql_parse.py b/superset/sql_parse.py
index 0619ba1..069d37c 100644
--- a/superset/sql_parse.py
+++ b/superset/sql_parse.py
@@ -16,7 +16,7 @@
# under the License.
# pylint: disable=C,R,W
import logging
-from typing import Optional
+from typing import List, Optional, Set
import sqlparse
from sqlparse.sql import Identifier, IdentifierList, remove_quotes, Token,
TokenList
@@ -31,10 +31,10 @@ CTE_PREFIX = "CTE__"
class ParsedQuery(object):
def __init__(self, sql_statement):
- self.sql = sql_statement
- self._table_names = set()
- self._alias_names = set()
- self._limit = None
+ self.sql: str = sql_statement
+ self._table_names: Set[str] = set()
+ self._alias_names: Set[str] = set()
+ self._limit: Optional[int] = None
logging.info("Parsing with sqlparse statement {}".format(self.sql))
self._parsed = sqlparse.parse(self.stripped())
@@ -44,27 +44,27 @@ class ParsedQuery(object):
self._table_names = self._table_names - self._alias_names
@property
- def tables(self):
+ def tables(self) -> Set[str]:
return self._table_names
@property
- def limit(self):
+ def limit(self) -> Optional[int]:
return self._limit
- def is_select(self):
+ def is_select(self) -> bool:
return self._parsed[0].get_type() == "SELECT"
- def is_explain(self):
+ def is_explain(self) -> bool:
return self.stripped().upper().startswith("EXPLAIN")
- def is_readonly(self):
+ def is_readonly(self) -> bool:
"""Pessimistic readonly, 100% sure statement won't mutate anything"""
return self.is_select() or self.is_explain()
- def stripped(self):
+ def stripped(self) -> str:
return self.sql.strip(" \t\n;")
- def get_statements(self):
+ def get_statements(self) -> List[str]:
"""Returns a list of SQL statements as strings, stripped"""
statements = []
for statement in self._parsed:
@@ -105,36 +105,39 @@ class ParsedQuery(object):
return None
@staticmethod
- def __is_identifier(token: Token):
+ def __is_identifier(token: Token) -> bool:
return isinstance(token, (IdentifierList, Identifier))
- def __process_tokenlist(self, tlist: TokenList):
+ def __process_tokenlist(self, token_list: TokenList):
+ """
+ Add table names to table set
+
+ :param token_list: TokenList to be processed
+ """
# exclude subselects
- if "(" not in str(tlist):
- table_name = self.__get_full_name(tlist)
+ if "(" not in str(token_list):
+ table_name = self.__get_full_name(token_list)
if table_name and not table_name.startswith(CTE_PREFIX):
self._table_names.add(table_name)
return
# store aliases
- if tlist.has_alias():
- self._alias_names.add(tlist.get_alias())
+ if token_list.has_alias():
+ self._alias_names.add(token_list.get_alias())
# some aliases are not parsed properly
- if tlist.tokens[0].ttype == Name:
- self._alias_names.add(tlist.tokens[0].value)
- self.__extract_from_token(tlist)
+ if token_list.tokens[0].ttype == Name:
+ self._alias_names.add(token_list.tokens[0].value)
+ self.__extract_from_token(token_list)
- def as_create_table(self, table_name, overwrite=False):
+ def as_create_table(self, table_name: str, overwrite: bool = False) -> str:
"""Reformats the query into the create table as query.
Works only for the single select SQL statements, in all other cases
the sql query is not modified.
- :param superset_query: string, sql query that will be executed
- :param table_name: string, will contain the results of the
- query execution
- :param overwrite, boolean, table table_name will be dropped if true
- :return: string, create table as query
+ :param table_name: Table that will contain the results of the query
execution
+ :param overwrite: table_name will be dropped if true
+ :return: Create table as query
"""
exec_sql = ""
sql = self.stripped()
@@ -143,7 +146,12 @@ class ParsedQuery(object):
exec_sql += f"CREATE TABLE {table_name} AS \n{sql}"
return exec_sql
- def __extract_from_token(self, token, depth=0):
+ def __extract_from_token(self, token: Token):
+ """
+ Populate self._table_names from token
+
+ :param token: instance of Token or child class, e.g. TokenList, to be
processed
+ """
if not hasattr(token, "tokens"):
return
@@ -151,7 +159,7 @@ class ParsedQuery(object):
for item in token.tokens:
if item.is_group and not self.__is_identifier(item):
- self.__extract_from_token(item, depth=depth + 1)
+ self.__extract_from_token(item)
if item.ttype in Keyword and (
item.normalized in PRECEDES_TABLE_NAME
@@ -174,9 +182,15 @@ class ParsedQuery(object):
elif isinstance(item, IdentifierList):
for token in item.tokens:
if not self.__is_identifier(token):
- self.__extract_from_token(item, depth=depth + 1)
+ self.__extract_from_token(item)
- def _extract_limit_from_query(self, statement):
+ def _extract_limit_from_query(self, statement: TokenList) -> Optional[int]:
+ """
+ Extract limit clause from SQL statement.
+
+ :param statement: SQL statement
+ :return: Limit extracted from query, None if no limit present in
statement
+ """
idx, _ = statement.token_next_by(m=(Keyword, "LIMIT"))
if idx is not None:
_, token = statement.token_next(idx=idx)
@@ -188,10 +202,16 @@ class ParsedQuery(object):
_, token = token.token_next(idx=idx)
if token and token.ttype ==
sqlparse.tokens.Literal.Number.Integer:
return int(token.value)
+ return None
- def get_query_with_new_limit(self, new_limit):
- """returns the query with the specified limit"""
- """does not change the underlying query"""
+ def get_query_with_new_limit(self, new_limit: int) -> str:
+ """
+ returns the query with the specified limit.
+ Does not change the underlying query
+
+ :param new_limit: Limit to be incorporated into returned query
+ :return: The original query with new limit
+ """
if not self._limit:
return f"{self.stripped()}\nLIMIT {new_limit}"
limit_pos = None
diff --git a/tests/db_engine_specs_test.py b/tests/db_engine_specs_test.py
index 1b0d87c..3419d25 100644
--- a/tests/db_engine_specs_test.py
+++ b/tests/db_engine_specs_test.py
@@ -26,9 +26,9 @@ from sqlalchemy.types import String, UnicodeText
from superset.db_engine_specs import engines
from superset.db_engine_specs.base import (
+ _create_time_grains_tuple,
BaseEngineSpec,
builtin_time_grains,
- create_time_grains_tuple,
)
from superset.db_engine_specs.bigquery import BigQueryEngineSpec
from superset.db_engine_specs.hive import HiveEngineSpec
@@ -318,7 +318,7 @@ class DbEngineSpecsTestCase(SupersetTestCase):
blacklist = ["PT1M"]
time_grains = {"PT1S": "second", "PT1M": "minute"}
time_grain_functions = {"PT1S": "{col}", "PT1M": "{col}"}
- time_grains = create_time_grains_tuple(
+ time_grains = _create_time_grains_tuple(
time_grains, time_grain_functions, blacklist
)
self.assertEqual(1, len(time_grains))