This is an automated email from the ASF dual-hosted git repository.

maximebeauchemin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-superset.git


The following commit(s) were added to refs/heads/master by this push:
     new 607cfd1  Moving away from using the root logger everywhere (#9099)
607cfd1 is described below

commit 607cfd1f29736590fbba397c4f8a04526be66aff
Author: Craig Rueda <[email protected]>
AuthorDate: Fri Feb 7 23:38:48 2020 -0800

    Moving away from using the root logger everywhere (#9099)
    
    * Moving away from using the root logger everywhere
    
    * self.logger -> logger
---
 superset/cli.py                           | 24 +++++++-------
 superset/common/query_context.py          | 17 +++++-----
 superset/config.py                        |  7 ++--
 superset/connectors/druid/models.py       | 29 +++++++++--------
 superset/connectors/druid/views.py        |  4 ++-
 superset/connectors/sqla/models.py        | 13 ++++----
 superset/db_engine_specs/hive.py          | 15 +++++----
 superset/db_engine_specs/presto.py        |  9 +++---
 superset/models/core.py                   | 11 ++++---
 superset/models/dashboard.py              |  7 ++--
 superset/models/helpers.py                | 10 +++---
 superset/models/slice.py                  |  9 +++---
 superset/result_set.py                    |  6 ++--
 superset/security/manager.py              | 20 ++++++------
 superset/sql_parse.py                     |  3 +-
 superset/sql_validators/presto_db.py      |  9 +++---
 superset/stats_logger.py                  | 10 +++---
 superset/tasks/schedules.py               |  7 ++--
 superset/utils/core.py                    | 29 +++++++++--------
 superset/utils/dashboard_import_export.py |  4 ++-
 superset/utils/decorators.py              |  5 +--
 superset/utils/dict_import_export.py      | 13 ++++----
 superset/utils/import_datasource.py       |  8 +++--
 superset/utils/logging_configurator.py    |  4 ++-
 superset/views/base.py                    | 15 +++++----
 superset/views/base_api.py                |  8 ++---
 superset/views/core.py                    | 53 ++++++++++++++++---------------
 superset/viz.py                           | 17 +++++-----
 tests/core_tests.py                       |  4 ++-
 tests/email_tests.py                      |  7 ++--
 tests/viz_tests.py                        |  5 ++-
 31 files changed, 213 insertions(+), 169 deletions(-)

diff --git a/superset/cli.py b/superset/cli.py
index 13aa713..013a7a8 100755
--- a/superset/cli.py
+++ b/superset/cli.py
@@ -33,6 +33,8 @@ from superset.app import create_app
 from superset.extensions import celery_app, db
 from superset.utils import core as utils
 
+logger = logging.getLogger(__name__)
+
 
 @click.group(cls=FlaskGroup, create_app=create_app)
 @with_appcontext
@@ -180,7 +182,7 @@ def refresh_druid(datasource, merge):
             cluster.refresh_datasources(datasource_name=datasource, 
merge_flag=merge)
         except Exception as e:  # pylint: disable=broad-except
             print("Error while processing cluster '{}'\n{}".format(cluster, 
str(e)))
-            logging.exception(e)
+            logger.exception(e)
         cluster.metadata_last_refreshed = datetime.now()
         print("Refreshed metadata from cluster " "[" + cluster.cluster_name + 
"]")
     session.commit()
@@ -222,13 +224,13 @@ def import_dashboards(path, recursive, username):
     if username is not None:
         g.user = security_manager.find_user(username=username)
     for file_ in files:
-        logging.info("Importing dashboard from file %s", file_)
+        logger.info("Importing dashboard from file %s", file_)
         try:
             with file_.open() as data_stream:
                 dashboard_import_export.import_dashboards(db.session, 
data_stream)
         except Exception as e:  # pylint: disable=broad-except
-            logging.error("Error when importing dashboard from file %s", file_)
-            logging.error(e)
+            logger.error("Error when importing dashboard from file %s", file_)
+            logger.error(e)
 
 
 @superset.command()
@@ -247,7 +249,7 @@ def export_dashboards(print_stdout, dashboard_file):
     if print_stdout or not dashboard_file:
         print(data)
     if dashboard_file:
-        logging.info("Exporting dashboards to %s", dashboard_file)
+        logger.info("Exporting dashboards to %s", dashboard_file)
         with open(dashboard_file, "w") as data_stream:
             data_stream.write(data)
 
@@ -292,15 +294,15 @@ def import_datasources(path, sync, recursive):
         files.extend(path_object.rglob("*.yaml"))
         files.extend(path_object.rglob("*.yml"))
     for file_ in files:
-        logging.info("Importing datasources from file %s", file_)
+        logger.info("Importing datasources from file %s", file_)
         try:
             with file_.open() as data_stream:
                 dict_import_export.import_from_dict(
                     db.session, yaml.safe_load(data_stream), sync=sync_array
                 )
         except Exception as e:  # pylint: disable=broad-except
-            logging.error("Error when importing datasources from file %s", 
file_)
-            logging.error(e)
+            logger.error("Error when importing datasources from file %s", 
file_)
+            logger.error(e)
 
 
 @superset.command()
@@ -340,7 +342,7 @@ def export_datasources(
     if print_stdout or not datasource_file:
         yaml.safe_dump(data, stdout, default_flow_style=False)
     if datasource_file:
-        logging.info("Exporting datasources to %s", datasource_file)
+        logger.info("Exporting datasources to %s", datasource_file)
         with open(datasource_file, "w") as data_stream:
             yaml.safe_dump(data, data_stream, default_flow_style=False)
 
@@ -389,7 +391,7 @@ def update_datasources_cache():
 )
 def worker(workers):
     """Starts a Superset worker for async SQL query execution."""
-    logging.info(
+    logger.info(
         "The 'superset worker' command is deprecated. Please use the 'celery "
         "worker' command instead."
     )
@@ -424,7 +426,7 @@ def flower(port, address):
         f"--port={port} "
         f"--address={address} "
     )
-    logging.info(
+    logger.info(
         "The 'superset flower' command is deprecated. Please use the 'celery "
         "flower' command instead."
     )
diff --git a/superset/common/query_context.py b/superset/common/query_context.py
index 6c62f85..7c982f0 100644
--- a/superset/common/query_context.py
+++ b/superset/common/query_context.py
@@ -33,6 +33,7 @@ from .query_object import QueryObject
 
 config = app.config
 stats_logger: BaseStatsLogger = config["STATS_LOGGER"]
+logger = logging.getLogger(__name__)
 
 
 class QueryContext:
@@ -175,7 +176,7 @@ class QueryContext:
     ) -> Dict[str, Any]:
         """Handles caching around the df payload retrieval"""
         cache_key = self.cache_key(query_obj, **kwargs)
-        logging.info("Cache key: %s", cache_key)
+        logger.info("Cache key: %s", cache_key)
         is_loaded = False
         stacktrace = None
         df = pd.DataFrame()
@@ -196,11 +197,11 @@ class QueryContext:
                     is_loaded = True
                     stats_logger.incr("loaded_from_cache")
                 except Exception as e:  # pylint: disable=broad-except
-                    logging.exception(e)
-                    logging.error(
+                    logger.exception(e)
+                    logger.error(
                         "Error reading cache: %s", 
utils.error_msg_from_exception(e)
                     )
-                logging.info("Serving from cache")
+                logger.info("Serving from cache")
 
         if query_obj and not is_loaded:
             try:
@@ -213,7 +214,7 @@ class QueryContext:
                     stats_logger.incr("loaded_from_source")
                     is_loaded = True
             except Exception as e:  # pylint: disable=broad-except
-                logging.exception(e)
+                logger.exception(e)
                 if not error_message:
                     error_message = "{}".format(e)
                 status = utils.QueryStatus.FAILED
@@ -224,7 +225,7 @@ class QueryContext:
                     cache_value = dict(dttm=cached_dttm, df=df, query=query)
                     cache_binary = pkl.dumps(cache_value, 
protocol=pkl.HIGHEST_PROTOCOL)
 
-                    logging.info(
+                    logger.info(
                         "Caching %d chars at key %s", len(cache_binary), 
cache_key
                     )
 
@@ -233,8 +234,8 @@ class QueryContext:
                 except Exception as e:  # pylint: disable=broad-except
                     # cache.set call can fail if the backend is down or if
                     # the key is too large or whatever other reasons
-                    logging.warning("Could not cache key %s", cache_key)
-                    logging.exception(e)
+                    logger.warning("Could not cache key %s", cache_key)
+                    logger.exception(e)
                     cache.delete(cache_key)
         return {
             "cache_key": cache_key,
diff --git a/superset/config.py b/superset/config.py
index b46b99f..1ff7463 100644
--- a/superset/config.py
+++ b/superset/config.py
@@ -39,6 +39,9 @@ from superset.typing import CacheConfig
 from superset.utils.log import DBEventLogger
 from superset.utils.logging_configurator import DefaultLoggingConfigurator
 
+logger = logging.getLogger(__name__)
+
+
 # Realtime stats logger, a StatsD implementation exists
 STATS_LOGGER = DummyStatsLogger()
 EVENT_LOGGER = DBEventLogger()
@@ -773,7 +776,7 @@ if CONFIG_PATH_ENV_VAR in os.environ:
 
         print(f"Loaded your LOCAL configuration at [{cfg_path}]")
     except Exception:
-        logging.exception(
+        logger.exception(
             f"Failed to import config for {CONFIG_PATH_ENV_VAR}={cfg_path}"
         )
         raise
@@ -784,5 +787,5 @@ elif importlib.util.find_spec("superset_config"):
 
         print(f"Loaded your LOCAL configuration at 
[{superset_config.__file__}]")
     except Exception:
-        logging.exception("Found but failed to import local superset_config")
+        logger.exception("Found but failed to import local superset_config")
         raise
diff --git a/superset/connectors/druid/models.py 
b/superset/connectors/druid/models.py
index bc5bd7d..ca61a47 100644
--- a/superset/connectors/druid/models.py
+++ b/superset/connectors/druid/models.py
@@ -87,6 +87,7 @@ except ImportError:
 DRUID_TZ = conf.get("DRUID_TZ")
 POST_AGG_TYPE = "postagg"
 metadata = Model.metadata  # pylint: disable=no-member
+logger = logging.getLogger(__name__)
 
 try:
     # Postaggregator might not have been imported.
@@ -628,7 +629,7 @@ class DruidDatasource(Model, BaseDatasource):
 
     def latest_metadata(self):
         """Returns segment metadata from the latest segment"""
-        logging.info("Syncing datasource [{}]".format(self.datasource_name))
+        logger.info("Syncing datasource [{}]".format(self.datasource_name))
         client = self.cluster.get_pydruid_client()
         try:
             results = client.time_boundary(datasource=self.datasource_name)
@@ -657,8 +658,8 @@ class DruidDatasource(Model, BaseDatasource):
                 analysisTypes=[],
             )
         except Exception as e:
-            logging.warning("Failed first attempt to get latest segment")
-            logging.exception(e)
+            logger.warning("Failed first attempt to get latest segment")
+            logger.exception(e)
         if not segment_metadata:
             # if no segments in the past 7 days, look at all segments
             lbound = datetime(1901, 1, 1).isoformat()[:10]
@@ -674,8 +675,8 @@ class DruidDatasource(Model, BaseDatasource):
                     analysisTypes=[],
                 )
             except Exception as e:
-                logging.warning("Failed 2nd attempt to get latest segment")
-                logging.exception(e)
+                logger.warning("Failed 2nd attempt to get latest segment")
+                logger.exception(e)
         if segment_metadata:
             return segment_metadata[-1]["columns"]
 
@@ -963,7 +964,7 @@ class DruidDatasource(Model, BaseDatasource):
 
     def values_for_column(self, column_name: str, limit: int = 10000) -> List:
         """Retrieve some values for the given column"""
-        logging.info(
+        logger.info(
             "Getting values for columns [{}] limited to 
[{}]".format(column_name, limit)
         )
         # TODO: Use Lexicographic TopNMetricSpec once supported by PyDruid
@@ -1223,12 +1224,12 @@ class DruidDatasource(Model, BaseDatasource):
             qry["limit"] = row_limit
             client.scan(**qry)
         elif len(groupby) == 0 and not having_filters:
-            logging.info("Running timeseries query for no groupby values")
+            logger.info("Running timeseries query for no groupby values")
             del qry["dimensions"]
             client.timeseries(**qry)
         elif not having_filters and len(groupby) == 1 and order_desc:
             dim = list(qry["dimensions"])[0]
-            logging.info("Running two-phase topn query for dimension 
[{}]".format(dim))
+            logger.info("Running two-phase topn query for dimension 
[{}]".format(dim))
             pre_qry = deepcopy(qry)
             if timeseries_limit_metric:
                 order_by = utils.get_metric_name(timeseries_limit_metric)
@@ -1253,7 +1254,7 @@ class DruidDatasource(Model, BaseDatasource):
             del pre_qry["dimensions"]
 
             client.topn(**pre_qry)
-            logging.info("Phase 1 Complete")
+            logger.info("Phase 1 Complete")
             if phase == 2:
                 query_str += "// Two phase query\n// Phase 1\n"
             query_str += json.dumps(
@@ -1276,13 +1277,13 @@ class DruidDatasource(Model, BaseDatasource):
             del qry["dimensions"]
             qry["metric"] = list(qry["aggregations"].keys())[0]
             client.topn(**qry)
-            logging.info("Phase 2 Complete")
+            logger.info("Phase 2 Complete")
         elif len(groupby) > 0 or having_filters:
             # If grouping on multiple fields or using a having filter
             # we have to force a groupby query
-            logging.info("Running groupby query for dimensions 
[{}]".format(dimensions))
+            logger.info("Running groupby query for dimensions 
[{}]".format(dimensions))
             if timeseries_limit and is_timeseries:
-                logging.info("Running two-phase query for timeseries")
+                logger.info("Running two-phase query for timeseries")
 
                 pre_qry = deepcopy(qry)
                 pre_qry_dims = self._dimensions_to_values(qry["dimensions"])
@@ -1324,7 +1325,7 @@ class DruidDatasource(Model, BaseDatasource):
                     "columns": [{"dimension": order_by, "direction": 
order_direction}],
                 }
                 client.groupby(**pre_qry)
-                logging.info("Phase 1 Complete")
+                logger.info("Phase 1 Complete")
                 query_str += "// Two phase query\n// Phase 1\n"
                 query_str += json.dumps(
                     client.query_builder.last_query.query_dict, indent=2
@@ -1357,7 +1358,7 @@ class DruidDatasource(Model, BaseDatasource):
                     ],
                 }
             client.groupby(**qry)
-            logging.info("Query Complete")
+            logger.info("Query Complete")
         query_str += json.dumps(client.query_builder.last_query.query_dict, 
indent=2)
         return query_str
 
diff --git a/superset/connectors/druid/views.py 
b/superset/connectors/druid/views.py
index 5a2cea0..231616c 100644
--- a/superset/connectors/druid/views.py
+++ b/superset/connectors/druid/views.py
@@ -45,6 +45,8 @@ from superset.views.base import (
 
 from . import models
 
+logger = logging.getLogger(__name__)
+
 
 class DruidColumnInlineView(CompactCRUDMixin, SupersetModelView):
     datamodel = SQLAInterface(models.DruidColumn)
@@ -380,7 +382,7 @@ class Druid(BaseSupersetView):
                     ),
                     "danger",
                 )
-                logging.exception(e)
+                logger.exception(e)
                 pass
             if valid_cluster:
                 cluster.metadata_last_refreshed = datetime.now()
diff --git a/superset/connectors/sqla/models.py 
b/superset/connectors/sqla/models.py
index d1bffee..f894496 100644
--- a/superset/connectors/sqla/models.py
+++ b/superset/connectors/sqla/models.py
@@ -62,6 +62,7 @@ from superset.utils import core as utils, import_datasource
 
 config = app.config
 metadata = Model.metadata  # pylint: disable=no-member
+logger = logging.getLogger(__name__)
 
 
 class SqlaQuery(NamedTuple):
@@ -98,7 +99,7 @@ class AnnotationDatasource(BaseDatasource):
         except Exception as e:
             df = pd.DataFrame()
             status = utils.QueryStatus.FAILED
-            logging.exception(e)
+            logger.exception(e)
             error_message = utils.error_msg_from_exception(e)
         return QueryResult(
             status=status, df=df, duration=0, query="", 
error_message=error_message
@@ -590,7 +591,7 @@ class SqlaTable(Model, BaseDatasource):
     def get_query_str_extended(self, query_obj: Dict[str, Any]) -> 
QueryStringExtended:
         sqlaq = self.get_sqla_query(**query_obj)
         sql = self.database.compile_sqla_query(sqlaq.sqla_query)
-        logging.info(sql)
+        logger.info(sql)
         sql = sqlparse.format(sql, reindent=True)
         sql = self.mutate_query_from_config(sql)
         return QueryStringExtended(
@@ -1005,7 +1006,7 @@ class SqlaTable(Model, BaseDatasource):
         except Exception as e:
             df = pd.DataFrame()
             status = utils.QueryStatus.FAILED
-            logging.exception(f"Query {sql} on schema {self.schema} failed")
+            logger.exception(f"Query {sql} on schema {self.schema} failed")
             db_engine_spec = self.database.db_engine_spec
             error_message = db_engine_spec.extract_error_message(e)
 
@@ -1025,7 +1026,7 @@ class SqlaTable(Model, BaseDatasource):
         try:
             table = self.get_sqla_table_object()
         except Exception as e:
-            logging.exception(e)
+            logger.exception(e)
             raise Exception(
                 _(
                     "Table [{}] doesn't seem to exist in the specified 
database, "
@@ -1052,8 +1053,8 @@ class SqlaTable(Model, BaseDatasource):
                 )
             except Exception as e:
                 datatype = "UNKNOWN"
-                logging.error("Unrecognized data type in {}.{}".format(table, 
col.name))
-                logging.exception(e)
+                logger.error("Unrecognized data type in {}.{}".format(table, 
col.name))
+                logger.exception(e)
             dbcol = dbcols.get(col.name, None)
             if not dbcol:
                 dbcol = TableColumn(column_name=col.name, type=datatype)
diff --git a/superset/db_engine_specs/hive.py b/superset/db_engine_specs/hive.py
index 35f5297..ac06bc4 100644
--- a/superset/db_engine_specs/hive.py
+++ b/superset/db_engine_specs/hive.py
@@ -39,6 +39,7 @@ if TYPE_CHECKING:
 
 QueryStatus = utils.QueryStatus
 config = app.config
+logger = logging.getLogger(__name__)
 
 tracking_url_trans = conf.get("TRACKING_URL_TRANSFORMER")
 hive_poll_interval = conf.get("HIVE_POLL_INTERVAL")
@@ -118,7 +119,7 @@ class HiveEngineSpec(PrestoEngineSpec):
         bucket_path = config["CSV_TO_HIVE_UPLOAD_S3_BUCKET"]
 
         if not bucket_path:
-            logging.info("No upload bucket specified")
+            logger.info("No upload bucket specified")
             raise Exception(
                 "No upload bucket specified. You can specify one in the config 
file."
             )
@@ -224,7 +225,7 @@ class HiveEngineSpec(PrestoEngineSpec):
                 map_progress = int(match.groupdict()["map_progress"])
                 reduce_progress = int(match.groupdict()["reduce_progress"])
                 stages[stage_number] = (map_progress + reduce_progress) / 2
-        logging.info(
+        logger.info(
             "Progress detail: {}, "  # pylint: 
disable=logging-format-interpolation
             "current job {}, "
             "total jobs: {}".format(stages, current_job, total_jobs)
@@ -267,7 +268,7 @@ class HiveEngineSpec(PrestoEngineSpec):
             if log:
                 log_lines = log.splitlines()
                 progress = cls.progress(log_lines)
-                logging.info(f"Query {query_id}: Progress total: {progress}")
+                logger.info(f"Query {query_id}: Progress total: {progress}")
                 needs_commit = False
                 if progress > query.progress:
                     query.progress = progress
@@ -276,22 +277,22 @@ class HiveEngineSpec(PrestoEngineSpec):
                     tracking_url = cls.get_tracking_url(log_lines)
                     if tracking_url:
                         job_id = tracking_url.split("/")[-2]
-                        logging.info(
+                        logger.info(
                             f"Query {query_id}: Found the tracking url: 
{tracking_url}"
                         )
                         tracking_url = tracking_url_trans(tracking_url)
-                        logging.info(
+                        logger.info(
                             f"Query {query_id}: Transformation applied: 
{tracking_url}"
                         )
                         query.tracking_url = tracking_url
-                        logging.info(f"Query {query_id}: Job id: {job_id}")
+                        logger.info(f"Query {query_id}: Job id: {job_id}")
                         needs_commit = True
                 if job_id and len(log_lines) > last_log_line:
                     # Wait for job id before logging things out
                     # this allows for prefixing all log lines and becoming
                     # searchable in something like Kibana
                     for l in log_lines[last_log_line:]:
-                        logging.info(f"Query {query_id}: [{job_id}] {l}")
+                        logger.info(f"Query {query_id}: [{job_id}] {l}")
                     last_log_line = len(log_lines)
                 if needs_commit:
                     session.commit()
diff --git a/superset/db_engine_specs/presto.py 
b/superset/db_engine_specs/presto.py
index 0da8dd5..3e4bda5 100644
--- a/superset/db_engine_specs/presto.py
+++ b/superset/db_engine_specs/presto.py
@@ -45,6 +45,7 @@ if TYPE_CHECKING:
 
 QueryStatus = utils.QueryStatus
 config = app.config
+logger = logging.getLogger(__name__)
 
 
 def get_children(column: Dict[str, str]) -> List[Dict[str, str]]:
@@ -334,7 +335,7 @@ class PrestoEngineSpec(BaseEngineSpec):
                 else:  # otherwise column is a basic data type
                     column_type = presto_type_map[column.Type]()
             except KeyError:
-                logging.info(
+                logger.info(
                     "Did not recognize type {} of column {}".format(  # 
pylint: disable=logging-format-interpolation
                         column.Type, column.Column
                     )
@@ -714,7 +715,7 @@ class PrestoEngineSpec(BaseEngineSpec):
     def handle_cursor(cls, cursor, query, session):
         """Updates progress information"""
         query_id = query.id
-        logging.info(f"Query {query_id}: Polling the cursor for progress")
+        logger.info(f"Query {query_id}: Polling the cursor for progress")
         polled = cursor.poll()
         # poll returns dict -- JSON status information or ``None``
         # if the query is done
@@ -740,7 +741,7 @@ class PrestoEngineSpec(BaseEngineSpec):
                 total_splits = float(stats.get("totalSplits"))
                 if total_splits and completed_splits:
                     progress = 100 * (completed_splits / total_splits)
-                    logging.info(
+                    logger.info(
                         "Query {} progress: {} / {} "  # pylint: 
disable=logging-format-interpolation
                         "splits".format(query_id, completed_splits, 
total_splits)
                     )
@@ -748,7 +749,7 @@ class PrestoEngineSpec(BaseEngineSpec):
                         query.progress = progress
                     session.commit()
             time.sleep(1)
-            logging.info(f"Query {query_id}: Polling the cursor for progress")
+            logger.info(f"Query {query_id}: Polling the cursor for progress")
             polled = cursor.poll()
 
     @classmethod
diff --git a/superset/models/core.py b/superset/models/core.py
index 3c5d370..3ad341b 100755
--- a/superset/models/core.py
+++ b/superset/models/core.py
@@ -63,6 +63,7 @@ custom_password_store = 
config["SQLALCHEMY_CUSTOM_PASSWORD_STORE"]
 stats_logger = config["STATS_LOGGER"]
 log_query = config["QUERY_LOGGER"]
 metadata = Model.metadata  # pylint: disable=no-member
+logger = logging.getLogger(__name__)
 
 PASSWORD_MASK = "X" * 10
 DB_CONNECTION_MUTATOR = config["DB_CONNECTION_MUTATOR"]
@@ -291,7 +292,7 @@ class Database(
         )
 
         masked_url = self.get_password_masked_url(sqlalchemy_url)
-        logging.info("Database.get_sqla_engine(). Masked URL: %s", 
str(masked_url))
+        logger.info("Database.get_sqla_engine(). Masked URL: %s", 
str(masked_url))
 
         params = extra.get("engine_params", {})
         if nullpool:
@@ -477,7 +478,7 @@ class Database(
                 utils.DatasourceName(table=table, schema=schema) for table in 
tables
             ]
         except Exception as e:  # pylint: disable=broad-except
-            logging.exception(e)
+            logger.exception(e)
 
     @cache_util.memoized_func(
         key=lambda *args, **kwargs: 
f"db:{{}}:schema:{kwargs.get('schema')}:view_list",  # type: ignore
@@ -507,7 +508,7 @@ class Database(
             )
             return [utils.DatasourceName(table=view, schema=schema) for view 
in views]
         except Exception as e:  # pylint: disable=broad-except
-            logging.exception(e)
+            logger.exception(e)
 
     @cache_util.memoized_func(
         key=lambda *args, **kwargs: "db:{}:schema_list", attribute_in_key="id"
@@ -554,7 +555,7 @@ class Database(
             try:
                 extra = json.loads(self.extra)
             except json.JSONDecodeError as e:
-                logging.error(e)
+                logger.error(e)
                 raise e
         return extra
 
@@ -564,7 +565,7 @@ class Database(
             try:
                 encrypted_extra = json.loads(self.encrypted_extra)
             except json.JSONDecodeError as e:
-                logging.error(e)
+                logger.error(e)
                 raise e
         return encrypted_extra
 
diff --git a/superset/models/dashboard.py b/superset/models/dashboard.py
index 88f215d..14d24b4 100644
--- a/superset/models/dashboard.py
+++ b/superset/models/dashboard.py
@@ -51,6 +51,7 @@ if TYPE_CHECKING:
 
 metadata = Model.metadata  # pylint: disable=no-member
 config = app.config
+logger = logging.getLogger(__name__)
 
 
 def copy_dashboard(mapper, connection, target):
@@ -277,11 +278,11 @@ class Dashboard(  # pylint: 
disable=too-many-instance-attributes
                         value["meta"]["chartId"] = 
old_to_new_slc_id_dict[old_slice_id]
             dashboard.position_json = json.dumps(position_data)
 
-        logging.info(
+        logger.info(
             "Started import of the dashboard: %s", 
dashboard_to_import.to_json()
         )
         session = db.session
-        logging.info("Dashboard has %d slices", 
len(dashboard_to_import.slices))
+        logger.info("Dashboard has %d slices", len(dashboard_to_import.slices))
         # copy slices object as Slice.import_slice will mutate the slice
         # and will remove the existing dashboard - slice association
         slices = copy(dashboard_to_import.slices)
@@ -297,7 +298,7 @@ class Dashboard(  # pylint: 
disable=too-many-instance-attributes
             if "remote_id" in slc.params_dict
         }
         for slc in slices:
-            logging.info(
+            logger.info(
                 "Importing slice %s from the dashboard: %s",
                 slc.to_json(),
                 dashboard_to_import.dashboard_title,
diff --git a/superset/models/helpers.py b/superset/models/helpers.py
index 086c4b5..b4d5edc 100644
--- a/superset/models/helpers.py
+++ b/superset/models/helpers.py
@@ -36,6 +36,8 @@ from sqlalchemy.orm.exc import MultipleResultsFound
 
 from superset.utils.core import QueryStatus
 
+logger = logging.getLogger(__name__)
+
 
 def json_to_dict(json_str):
     if json_str:
@@ -165,7 +167,7 @@ class ImportMixin:
             obj_query = session.query(cls).filter(and_(*filters))
             obj = obj_query.one_or_none()
         except MultipleResultsFound as e:
-            logging.error(
+            logger.error(
                 "Error importing %s \n %s \n %s",
                 cls.__name__,
                 str(obj_query),
@@ -177,13 +179,13 @@ class ImportMixin:
             is_new_obj = True
             # Create new DB object
             obj = cls(**dict_rep)
-            logging.info("Importing new %s %s", obj.__tablename__, str(obj))
+            logger.info("Importing new %s %s", obj.__tablename__, str(obj))
             if cls.export_parent and parent:
                 setattr(obj, cls.export_parent, parent)
             session.add(obj)
         else:
             is_new_obj = False
-            logging.info("Updating %s %s", obj.__tablename__, str(obj))
+            logger.info("Updating %s %s", obj.__tablename__, str(obj))
             # Update columns
             for k, v in dict_rep.items():
                 setattr(obj, k, v)
@@ -213,7 +215,7 @@ class ImportMixin:
                         
session.query(child_class).filter(and_(*delete_filters))
                     ).difference(set(added))
                     for o in to_delete:
-                        logging.info("Deleting %s %s", child, str(obj))
+                        logger.info("Deleting %s %s", child, str(obj))
                         session.delete(o)
 
         return obj
diff --git a/superset/models/slice.py b/superset/models/slice.py
index 4b54ada..46ebf30 100644
--- a/superset/models/slice.py
+++ b/superset/models/slice.py
@@ -45,6 +45,7 @@ slice_user = Table(
     Column("user_id", Integer, ForeignKey("ab_user.id")),
     Column("slice_id", Integer, ForeignKey("slices.id")),
 )
+logger = logging.getLogger(__name__)
 
 
 class Slice(
@@ -145,7 +146,7 @@ class Slice(
             d = self.viz.data
             self.token = d.get("token")  # type: ignore
         except Exception as e:  # pylint: disable=broad-except
-            logging.exception(e)
+            logger.exception(e)
             d["error"] = str(e)
         return {
             "cache_timeout": self.cache_timeout,
@@ -172,8 +173,8 @@ class Slice(
         try:
             form_data = json.loads(self.params)
         except Exception as e:  # pylint: disable=broad-except
-            logging.error("Malformed json in slice's params")
-            logging.exception(e)
+            logger.error("Malformed json in slice's params")
+            logger.exception(e)
         form_data.update(
             {
                 "slice_id": self.id,
@@ -292,7 +293,7 @@ class Slice(
             session.flush()
             return slc_to_override.id
         session.add(slc_to_import)
-        logging.info("Final slice: %s", str(slc_to_import.to_json()))
+        logger.info("Final slice: %s", str(slc_to_import.to_json()))
         session.flush()
         return slc_to_import.id
 
diff --git a/superset/result_set.py b/superset/result_set.py
index 5d7e378..29294d7 100644
--- a/superset/result_set.py
+++ b/superset/result_set.py
@@ -30,6 +30,8 @@ import pyarrow as pa
 from superset import db_engine_specs
 from superset.utils import core as utils
 
+logger = logging.getLogger(__name__)
+
 
 def dedup(l: List[str], suffix: str = "__", case_sensitive: bool = True) -> 
List[str]:
     """De-duplicates a list of string by suffixing a counter
@@ -134,7 +136,7 @@ class SupersetResultSet:
                                     series, type=pa.timestamp("ns", tz=tz)
                                 )
                         except Exception as e:
-                            logging.exception(e)
+                            logger.exception(e)
 
         self.table = pa.Table.from_arrays(pa_data, names=column_names)
         self._type_dict: Dict[str, Any] = {}
@@ -146,7 +148,7 @@ class SupersetResultSet:
                 if deduped_cursor_desc
             }
         except Exception as e:
-            logging.exception(e)
+            logger.exception(e)
 
     @staticmethod
     def convert_pa_dtype(pa_dtype: pa.DataType) -> Optional[str]:
diff --git a/superset/security/manager.py b/superset/security/manager.py
index 2d3f5e9..d9af4fc 100644
--- a/superset/security/manager.py
+++ b/superset/security/manager.py
@@ -51,6 +51,8 @@ if TYPE_CHECKING:
     from superset.models.core import Database
     from superset.viz import BaseViz
 
+logger = logging.getLogger(__name__)
+
 
 class SupersetSecurityListWidget(ListWidget):
     """
@@ -529,7 +531,7 @@ class SupersetSecurityManager(SecurityManager):
         :see: SecurityManager.add_permission_view_menu
         """
 
-        logging.warning(
+        logger.warning(
             "This method 'merge_perm' is deprecated use 
add_permission_view_menu"
         )
         self.add_permission_view_menu(permission_name, view_menu_name)
@@ -561,7 +563,7 @@ class SupersetSecurityManager(SecurityManager):
         from superset.connectors.base.models import BaseMetric
         from superset.models import core as models
 
-        logging.info("Fetching a set of all perms to lookup which ones are 
missing")
+        logger.info("Fetching a set of all perms to lookup which ones are 
missing")
         all_pvs = set()
         for pv in self.get_session.query(self.permissionview_model).all():
             if pv.permission and pv.view_menu:
@@ -572,18 +574,18 @@ class SupersetSecurityManager(SecurityManager):
             if view_menu and perm and (view_menu, perm) not in all_pvs:
                 self.add_permission_view_menu(view_menu, perm)
 
-        logging.info("Creating missing datasource permissions.")
+        logger.info("Creating missing datasource permissions.")
         datasources = ConnectorRegistry.get_all_datasources(db.session)
         for datasource in datasources:
             merge_pv("datasource_access", datasource.get_perm())
             merge_pv("schema_access", datasource.get_schema_perm())
 
-        logging.info("Creating missing database permissions.")
+        logger.info("Creating missing database permissions.")
         databases = db.session.query(models.Database).all()
         for database in databases:
             merge_pv("database_access", database.perm)
 
-        logging.info("Creating missing metrics permissions")
+        logger.info("Creating missing metrics permissions")
         metrics: List[BaseMetric] = []
         for datasource_class in ConnectorRegistry.sources.values():
             metrics += 
list(db.session.query(datasource_class.metric_class).all())
@@ -593,7 +595,7 @@ class SupersetSecurityManager(SecurityManager):
         Clean up the FAB faulty permissions.
         """
 
-        logging.info("Cleaning faulty perms")
+        logger.info("Cleaning faulty perms")
         sesh = self.get_session
         pvms = sesh.query(ab_models.PermissionView).filter(
             or_(
@@ -604,7 +606,7 @@ class SupersetSecurityManager(SecurityManager):
         deleted_count = pvms.delete()
         sesh.commit()
         if deleted_count:
-            logging.info("Deleted {} faulty permissions".format(deleted_count))
+            logger.info("Deleted {} faulty permissions".format(deleted_count))
 
     def sync_role_definitions(self) -> None:
         """
@@ -613,7 +615,7 @@ class SupersetSecurityManager(SecurityManager):
 
         from superset import conf
 
-        logging.info("Syncing role definition")
+        logger.info("Syncing role definition")
 
         self.create_custom_permissions()
 
@@ -641,7 +643,7 @@ class SupersetSecurityManager(SecurityManager):
         :param pvm_check: The FAB permission/view check
         """
 
-        logging.info("Syncing {} perms".format(role_name))
+        logger.info("Syncing {} perms".format(role_name))
         sesh = self.get_session
         pvms = sesh.query(ab_models.PermissionView).all()
         pvms = [p for p in pvms if p.permission and p.view_menu]
diff --git a/superset/sql_parse.py b/superset/sql_parse.py
index 3a1baab..5e16120 100644
--- a/superset/sql_parse.py
+++ b/superset/sql_parse.py
@@ -26,6 +26,7 @@ RESULT_OPERATIONS = {"UNION", "INTERSECT", "EXCEPT", "SELECT"}
 ON_KEYWORD = "ON"
 PRECEDES_TABLE_NAME = {"FROM", "JOIN", "DESCRIBE", "WITH", "LEFT JOIN", "RIGHT 
JOIN"}
 CTE_PREFIX = "CTE__"
+logger = logging.getLogger(__name__)
 
 
 def _extract_limit_from_query(statement: TokenList) -> Optional[int]:
@@ -56,7 +57,7 @@ class ParsedQuery:
         self._alias_names: Set[str] = set()
         self._limit: Optional[int] = None
 
-        logging.info("Parsing with sqlparse statement %s", self.sql)
+        logger.info("Parsing with sqlparse statement %s", self.sql)
         self._parsed = sqlparse.parse(self.stripped())
         for statement in self._parsed:
             self.__extract_from_token(statement)
diff --git a/superset/sql_validators/presto_db.py 
b/superset/sql_validators/presto_db.py
index 76efc21..681e034 100644
--- a/superset/sql_validators/presto_db.py
+++ b/superset/sql_validators/presto_db.py
@@ -30,6 +30,7 @@ from superset.utils.core import sources
 MAX_ERROR_ROWS = 10
 
 config = app.config
+logger = logging.getLogger(__name__)
 
 
 class PrestoSQLValidationError(Exception):
@@ -70,7 +71,7 @@ class PrestoDBSQLValidator(BaseSQLValidator):
             db_engine_spec.execute(cursor, sql)
             polled = cursor.poll()
             while polled:
-                logging.info("polling presto for validation progress")
+                logger.info("polling presto for validation progress")
                 stats = polled.get("stats", {})
                 if stats:
                     state = stats.get("state")
@@ -136,7 +137,7 @@ class PrestoDBSQLValidator(BaseSQLValidator):
                 end_column=end_column,
             )
         except Exception as e:
-            logging.exception(f"Unexpected error running validation query: 
{e}")
+            logger.exception(f"Unexpected error running validation query: {e}")
             raise e
 
     @classmethod
@@ -154,7 +155,7 @@ class PrestoDBSQLValidator(BaseSQLValidator):
         parsed_query = ParsedQuery(sql)
         statements = parsed_query.get_statements()
 
-        logging.info(f"Validating {len(statements)} statement(s)")
+        logger.info(f"Validating {len(statements)} statement(s)")
         engine = database.get_sqla_engine(
             schema=schema,
             nullpool=True,
@@ -172,6 +173,6 @@ class PrestoDBSQLValidator(BaseSQLValidator):
                     )
                     if annotation:
                         annotations.append(annotation)
-        logging.debug(f"Validation found {len(annotations)} error(s)")
+        logger.debug(f"Validation found {len(annotations)} error(s)")
 
         return annotations
diff --git a/superset/stats_logger.py b/superset/stats_logger.py
index aca511a..758208a 100644
--- a/superset/stats_logger.py
+++ b/superset/stats_logger.py
@@ -18,6 +18,8 @@ import logging
 
 from colorama import Fore, Style
 
+logger = logging.getLogger(__name__)
+
 
 class BaseStatsLogger:
     """Base class for logging realtime events"""
@@ -48,18 +50,18 @@ class BaseStatsLogger:
 
 class DummyStatsLogger(BaseStatsLogger):
     def incr(self, key):
-        logging.debug(Fore.CYAN + "[stats_logger] (incr) " + key + 
Style.RESET_ALL)
+        logger.debug(Fore.CYAN + "[stats_logger] (incr) " + key + 
Style.RESET_ALL)
 
     def decr(self, key):
-        logging.debug((Fore.CYAN + "[stats_logger] (decr) " + key + 
Style.RESET_ALL))
+        logger.debug((Fore.CYAN + "[stats_logger] (decr) " + key + 
Style.RESET_ALL))
 
     def timing(self, key, value):
-        logging.debug(
+        logger.debug(
             (Fore.CYAN + f"[stats_logger] (timing) {key} | {value} " + 
Style.RESET_ALL)
         )
 
     def gauge(self, key):
-        logging.debug(
+        logger.debug(
             (Fore.CYAN + "[stats_logger] (gauge) " + f"{key}" + 
Style.RESET_ALL)
         )
 
diff --git a/superset/tasks/schedules.py b/superset/tasks/schedules.py
index de7ba5a..3889d02 100644
--- a/superset/tasks/schedules.py
+++ b/superset/tasks/schedules.py
@@ -49,7 +49,8 @@ from superset.utils.core import get_email_address_list, 
send_email_smtp
 
 # Globals
 config = app.config
-logging.getLogger("tasks.email_reports").setLevel(logging.INFO)
+logger = logging.getLogger("tasks.email_reports")
+logger.setLevel(logging.INFO)
 
 # Time in seconds, we will wait for the page to load and render
 PAGE_RENDER_WAIT = 30
@@ -359,7 +360,7 @@ def schedule_email_report(
 
     # The user may have disabled the schedule. If so, ignore this
     if not schedule or not schedule.active:
-        logging.info("Ignoring deactivated schedule")
+        logger.info("Ignoring deactivated schedule")
         return
 
     # TODO: Detach the schedule object from the db session
@@ -420,7 +421,7 @@ def schedule_hourly():
     """ Celery beat job meant to be invoked hourly """
 
     if not config["ENABLE_SCHEDULED_EMAIL_REPORTS"]:
-        logging.info("Scheduled email reports not enabled in config")
+        logger.info("Scheduled email reports not enabled in config")
         return
 
     resolution = config["EMAIL_REPORTS_CRON_RESOLUTION"] * 60
diff --git a/superset/utils/core.py b/superset/utils/core.py
index b927742..41daebc 100644
--- a/superset/utils/core.py
+++ b/superset/utils/core.py
@@ -65,6 +65,7 @@ except ImportError:
 
 
 logging.getLogger("MARKDOWN").setLevel(logging.INFO)
+logger = logging.getLogger(__name__)
 
 DTTM_ALIAS = "__timestamp"
 ADHOC_METRIC_EXPRESSION_TYPES = {"SIMPLE": "SIMPLE", "SQL": "SQL"}
@@ -99,9 +100,9 @@ def flasher(msg, severity=None):
         flash(msg, severity)
     except RuntimeError:
         if severity == "danger":
-            logging.error(msg)
+            logger.error(msg)
         else:
-            logging.info(msg)
+            logger.info(msg)
 
 
 class _memoized:
@@ -242,7 +243,7 @@ def parse_human_datetime(s):
                 parsed_dttm = parsed_dttm.replace(hour=0, minute=0, second=0)
             dttm = dttm_from_timetuple(parsed_dttm.utctimetuple())
         except Exception as e:
-            logging.exception(e)
+            logger.exception(e)
             raise ValueError("Couldn't parse date string [{}]".format(s))
     return dttm
 
@@ -544,7 +545,7 @@ def validate_json(obj):
         try:
             json.loads(obj)
         except Exception as e:
-            logging.error(f"JSON is not valid {e}")
+            logger.error(f"JSON is not valid {e}")
             raise SupersetException("JSON is not valid")
 
 
@@ -568,7 +569,7 @@ class timeout:
         self.error_message = error_message
 
     def handle_timeout(self, signum, frame):
-        logging.error("Process timed out")
+        logger.error("Process timed out")
         raise SupersetTimeoutException(self.error_message)
 
     def __enter__(self):
@@ -576,15 +577,15 @@ class timeout:
             signal.signal(signal.SIGALRM, self.handle_timeout)
             signal.alarm(self.seconds)
         except ValueError as e:
-            logging.warning("timeout can't be used in the current context")
-            logging.exception(e)
+            logger.warning("timeout can't be used in the current context")
+            logger.exception(e)
 
     def __exit__(self, type, value, traceback):
         try:
             signal.alarm(0)
         except ValueError as e:
-            logging.warning("timeout can't be used in the current context")
-            logging.exception(e)
+            logger.warning("timeout can't be used in the current context")
+            logger.exception(e)
 
 
 def pessimistic_connection_handling(some_engine):
@@ -640,7 +641,7 @@ def notify_user_about_perm_udate(granter, user, role, 
datasource, tpl_name, conf
     msg = render_template(
         tpl_name, granter=granter, user=user, role=role, datasource=datasource
     )
-    logging.info(msg)
+    logger.info(msg)
     subject = __(
         "[Superset] Access to the datasource %(name)s was granted",
         name=datasource.full_name,
@@ -746,12 +747,12 @@ def send_MIME_email(e_from, e_to, mime_msg, config, 
dryrun=False):
             s.starttls()
         if SMTP_USER and SMTP_PASSWORD:
             s.login(SMTP_USER, SMTP_PASSWORD)
-        logging.info("Sent an email to " + str(e_to))
+        logger.info("Sent an email to " + str(e_to))
         s.sendmail(e_from, e_to, mime_msg.as_string())
         s.quit()
     else:
-        logging.info("Dryrun enabled, email notification content is below:")
-        logging.info(mime_msg.as_string())
+        logger.info("Dryrun enabled, email notification content is below:")
+        logger.info(mime_msg.as_string())
 
 
 def get_email_address_list(address_string: str) -> List[str]:
@@ -924,7 +925,7 @@ def get_or_create_db(database_name, sqlalchemy_uri, *args, 
**kwargs):
         
db.session.query(models.Database).filter_by(database_name=database_name).first()
     )
     if not database:
-        logging.info(f"Creating database reference for {database_name}")
+        logger.info(f"Creating database reference for {database_name}")
         database = models.Database(database_name=database_name, *args, 
**kwargs)
         db.session.add(database)
 
diff --git a/superset/utils/dashboard_import_export.py 
b/superset/utils/dashboard_import_export.py
index 7d8efe0..53100f8 100644
--- a/superset/utils/dashboard_import_export.py
+++ b/superset/utils/dashboard_import_export.py
@@ -24,6 +24,8 @@ from superset.connectors.sqla.models import SqlaTable, 
SqlMetric, TableColumn
 from superset.models.dashboard import Dashboard
 from superset.models.slice import Slice
 
+logger = logging.getLogger(__name__)
+
 
 def decode_dashboards(o):
     """
@@ -64,7 +66,7 @@ def import_dashboards(session, data_stream, import_time=None):
 
 def export_dashboards(session):
     """Returns all dashboards metadata as a json dump"""
-    logging.info("Starting export")
+    logger.info("Starting export")
     dashboards = session.query(Dashboard)
     dashboard_ids = []
     for dashboard in dashboards:
diff --git a/superset/utils/decorators.py b/superset/utils/decorators.py
index a44d3b1..c7d23ec 100644
--- a/superset/utils/decorators.py
+++ b/superset/utils/decorators.py
@@ -28,6 +28,7 @@ from superset.utils.dates import now_as_float
 # resource? Flask-Caching will cache forever, but for the HTTP header we need
 # to specify a "far future" date.
 FAR_FUTURE = 365 * 24 * 60 * 60  # 1 year in seconds
+logger = logging.getLogger(__name__)
 
 
 @contextmanager
@@ -81,7 +82,7 @@ def etag_cache(max_age, check_perms=bool):
                 except Exception:  # pylint: disable=broad-except
                     if app.debug:
                         raise
-                    logging.exception("Exception possibly due to cache 
backend.")
+                    logger.exception("Exception possibly due to cache 
backend.")
 
             # if no response was cached, compute it using the wrapped function
             if response is None:
@@ -103,7 +104,7 @@ def etag_cache(max_age, check_perms=bool):
                     except Exception:  # pylint: disable=broad-except
                         if app.debug:
                             raise
-                    logging.exception("Exception possibly due to cache 
backend.")
+                    logger.exception("Exception possibly due to cache 
backend.")
 
             return response.make_conditional(request)
 
diff --git a/superset/utils/dict_import_export.py 
b/superset/utils/dict_import_export.py
index a7eecdf..d7ede85 100644
--- a/superset/utils/dict_import_export.py
+++ b/superset/utils/dict_import_export.py
@@ -22,6 +22,7 @@ from superset.models.core import Database
 
 DATABASES_KEY = "databases"
 DRUID_CLUSTERS_KEY = "druid_clusters"
+logger = logging.getLogger(__name__)
 
 
 def export_schema_to_dict(back_references):
@@ -42,7 +43,7 @@ def export_schema_to_dict(back_references):
 
 def export_to_dict(session, recursive, back_references, include_defaults):
     """Exports databases and druid clusters to a dictionary"""
-    logging.info("Starting export")
+    logger.info("Starting export")
     dbs = session.query(Database)
     databases = [
         database.export_to_dict(
@@ -52,7 +53,7 @@ def export_to_dict(session, recursive, back_references, 
include_defaults):
         )
         for database in dbs
     ]
-    logging.info("Exported %d %s", len(databases), DATABASES_KEY)
+    logger.info("Exported %d %s", len(databases), DATABASES_KEY)
     cls = session.query(DruidCluster)
     clusters = [
         cluster.export_to_dict(
@@ -62,7 +63,7 @@ def export_to_dict(session, recursive, back_references, 
include_defaults):
         )
         for cluster in cls
     ]
-    logging.info("Exported %d %s", len(clusters), DRUID_CLUSTERS_KEY)
+    logger.info("Exported %d %s", len(clusters), DRUID_CLUSTERS_KEY)
     data = dict()
     if databases:
         data[DATABASES_KEY] = databases
@@ -74,15 +75,15 @@ def export_to_dict(session, recursive, back_references, 
include_defaults):
 def import_from_dict(session, data, sync=[]):
     """Imports databases and druid clusters from dictionary"""
     if isinstance(data, dict):
-        logging.info("Importing %d %s", len(data.get(DATABASES_KEY, [])), 
DATABASES_KEY)
+        logger.info("Importing %d %s", len(data.get(DATABASES_KEY, [])), 
DATABASES_KEY)
         for database in data.get(DATABASES_KEY, []):
             Database.import_from_dict(session, database, sync=sync)
 
-        logging.info(
+        logger.info(
             "Importing %d %s", len(data.get(DRUID_CLUSTERS_KEY, [])), 
DRUID_CLUSTERS_KEY
         )
         for datasource in data.get(DRUID_CLUSTERS_KEY, []):
             DruidCluster.import_from_dict(session, datasource, sync=sync)
         session.commit()
     else:
-        logging.info("Supplied object is not a dictionary.")
+        logger.info("Supplied object is not a dictionary.")
diff --git a/superset/utils/import_datasource.py 
b/superset/utils/import_datasource.py
index e155a1f..075dbe4 100644
--- a/superset/utils/import_datasource.py
+++ b/superset/utils/import_datasource.py
@@ -18,6 +18,8 @@ import logging
 
 from sqlalchemy.orm.session import make_transient
 
+logger = logging.getLogger(__name__)
+
 
 def import_datasource(
     session, i_datasource, lookup_database, lookup_datasource, import_time
@@ -29,7 +31,7 @@ def import_datasource(
      superset instances. Audit metadata isn't copies over.
     """
     make_transient(i_datasource)
-    logging.info("Started import of the datasource: %s", 
i_datasource.to_json())
+    logger.info("Started import of the datasource: %s", i_datasource.to_json())
 
     i_datasource.id = None
     i_datasource.database_id = lookup_database(i_datasource).id
@@ -49,7 +51,7 @@ def import_datasource(
     for metric in i_datasource.metrics:
         new_m = metric.copy()
         new_m.table_id = datasource.id
-        logging.info(
+        logger.info(
             "Importing metric %s from the datasource: %s",
             new_m.to_json(),
             i_datasource.full_name,
@@ -61,7 +63,7 @@ def import_datasource(
     for column in i_datasource.columns:
         new_c = column.copy()
         new_c.table_id = datasource.id
-        logging.info(
+        logger.info(
             "Importing column %s from the datasource: %s",
             new_c.to_json(),
             i_datasource.full_name,
diff --git a/superset/utils/logging_configurator.py 
b/superset/utils/logging_configurator.py
index a145bf9..396d35e 100644
--- a/superset/utils/logging_configurator.py
+++ b/superset/utils/logging_configurator.py
@@ -21,6 +21,8 @@ from logging.handlers import TimedRotatingFileHandler
 import flask.app
 import flask.config
 
+logger = logging.getLogger(__name__)
+
 
 # pylint: disable=too-few-public-methods
 class LoggingConfigurator(abc.ABC):
@@ -64,4 +66,4 @@ class DefaultLoggingConfigurator(LoggingConfigurator):
             )
             logging.getLogger().addHandler(handler)
 
-        logging.info("logging was configured successfully")
+        logger.info("logging was configured successfully")
diff --git a/superset/views/base.py b/superset/views/base.py
index 6b94bf0..75d6d54 100644
--- a/superset/views/base.py
+++ b/superset/views/base.py
@@ -51,6 +51,7 @@ FRONTEND_CONF_KEYS = (
     "SQLLAB_SAVE_WARNING_MESSAGE",
     "DISPLAY_MAX_ROW",
 )
+logger = logging.getLogger(__name__)
 
 
 def get_error_msg():
@@ -104,7 +105,7 @@ def api(f):
         try:
             return f(self, *args, **kwargs)
         except Exception as e:  # pylint: disable=broad-except
-            logging.exception(e)
+            logger.exception(e)
             return json_error_response(get_error_msg())
 
     return functools.update_wrapper(wraps, f)
@@ -121,20 +122,20 @@ def handle_api_exception(f):
         try:
             return f(self, *args, **kwargs)
         except SupersetSecurityException as e:
-            logging.exception(e)
+            logger.exception(e)
             return json_error_response(
                 utils.error_msg_from_exception(e), status=e.status, link=e.link
             )
         except SupersetException as e:
-            logging.exception(e)
+            logger.exception(e)
             return json_error_response(
                 utils.error_msg_from_exception(e), status=e.status
             )
         except HTTPException as e:
-            logging.exception(e)
+            logger.exception(e)
             return json_error_response(utils.error_msg_from_exception(e), 
status=e.code)
         except Exception as e:  # pylint: disable=broad-except
-            logging.exception(e)
+            logger.exception(e)
             return json_error_response(utils.error_msg_from_exception(e))
 
     return functools.update_wrapper(wraps, f)
@@ -173,7 +174,7 @@ def menu_data():
             )
         # when user object has no username
         except NameError as e:
-            logging.exception(e)
+            logger.exception(e)
 
         if logo_target_path.startswith("/"):
             root_path = f"/superset{logo_target_path}"
@@ -258,7 +259,7 @@ def validate_json(_form, field):
     try:
         json.loads(field.data)
     except Exception as e:
-        logging.exception(e)
+        logger.exception(e)
         raise Exception(_("json isn't valid"))
 
 
diff --git a/superset/views/base_api.py b/superset/views/base_api.py
index de5d928..c3099a2 100644
--- a/superset/views/base_api.py
+++ b/superset/views/base_api.py
@@ -27,6 +27,7 @@ from sqlalchemy.exc import SQLAlchemyError
 from superset.exceptions import SupersetSecurityException
 from superset.views.base import check_ownership
 
+logger = logging.getLogger(__name__)
 get_related_schema = {
     "type": "object",
     "properties": {
@@ -62,7 +63,6 @@ class BaseSupersetModelRestApi(ModelRestApi):
     Extends FAB's ModelResApi to implement specific superset generic 
functionality
     """
 
-    logger = logging.getLogger(__name__)
     method_permission_name = {
         "get_list": "list",
         "get": "show",
@@ -251,7 +251,7 @@ class BaseOwnedModelRestApi(BaseSupersetModelRestApi):
                 200, result=self.edit_model_schema.dump(item.data, 
many=False).data
             )
         except SQLAlchemyError as e:
-            self.logger.error(f"Error updating model 
{self.__class__.__name__}: {e}")
+            logger.error(f"Error updating model {self.__class__.__name__}: 
{e}")
             return self.response_422(message=str(e))
 
     @expose("/", methods=["POST"])
@@ -303,7 +303,7 @@ class BaseOwnedModelRestApi(BaseSupersetModelRestApi):
                 id=item.data.id,
             )
         except SQLAlchemyError as e:
-            self.logger.error(f"Error creating model 
{self.__class__.__name__}: {e}")
+            logger.error(f"Error creating model {self.__class__.__name__}: 
{e}")
             return self.response_422(message=str(e))
 
     @expose("/<pk>", methods=["DELETE"])
@@ -344,5 +344,5 @@ class BaseOwnedModelRestApi(BaseSupersetModelRestApi):
             self.datamodel.delete(item, raise_exception=True)
             return self.response(200, message="OK")
         except SQLAlchemyError as e:
-            self.logger.error(f"Error deleting model 
{self.__class__.__name__}: {e}")
+            logger.error(f"Error deleting model {self.__class__.__name__}: 
{e}")
             return self.response_422(message=str(e))
diff --git a/superset/views/core.py b/superset/views/core.py
index b0a234d..bbd50e3 100755
--- a/superset/views/core.py
+++ b/superset/views/core.py
@@ -123,6 +123,7 @@ SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT = 
config["SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT"
 stats_logger = config["STATS_LOGGER"]
 DAR = DatasourceAccessRequest
 QueryStatus = utils.QueryStatus
+logger = logging.getLogger(__name__)
 
 DATABASE_KEYS = [
     "allow_csv_upload",
@@ -220,7 +221,7 @@ def check_slice_perms(self, slice_id):
 def _deserialize_results_payload(
     payload: Union[bytes, str], query, use_msgpack: Optional[bool] = False
 ) -> dict:
-    logging.debug(f"Deserializing from msgpack: {use_msgpack}")
+    logger.debug(f"Deserializing from msgpack: {use_msgpack}")
     if use_msgpack:
         with stats_timing(
             "sqllab.query.results_backend_msgpack_deserialize", stats_logger
@@ -607,7 +608,7 @@ class Superset(BaseSupersetView):
             if query_obj:
                 query = viz_obj.datasource.get_query_str(query_obj)
         except Exception as e:
-            logging.exception(e)
+            logger.exception(e)
             return json_error_response(e)
 
         if not query:
@@ -746,7 +747,7 @@ class Superset(BaseSupersetView):
                     "danger",
                 )
             except Exception as e:
-                logging.exception(e)
+                logger.exception(e)
                 flash(
                     _(
                         "An unknown error occurred. "
@@ -1341,7 +1342,7 @@ class Superset(BaseSupersetView):
                 conn.scalar(select([1]))
                 return json_success('"OK"')
         except Exception as e:
-            logging.exception(e)
+            logger.exception(e)
             return json_error_response(
                 "Connection failed!\n\n" f"The error message returned 
was:\n{e}", 400
             )
@@ -1642,7 +1643,7 @@ class Superset(BaseSupersetView):
                 )
                 obj.get_json()
             except Exception as e:
-                self.logger.exception("Failed to warm up cache")
+                logger.exception("Failed to warm up cache")
                 return json_error_response(utils.error_msg_from_exception(e))
         return json_success(
             json.dumps(
@@ -1686,7 +1687,7 @@ class Superset(BaseSupersetView):
     @expose("/dashboard/<dashboard_id>/published/", methods=("GET", "POST"))
     def publish(self, dashboard_id):
         """Gets and toggles published status on dashboards"""
-        logging.warning(
+        logger.warning(
             "This API endpoint is deprecated and will be removed in version 
1.0.0"
         )
         session = db.session()
@@ -1860,7 +1861,7 @@ class Superset(BaseSupersetView):
                 "Can't find User '%(name)s', please ask your admin " "to 
create one.",
                 name=user_name,
             )
-            logging.error(err_msg)
+            logger.error(err_msg)
             return json_error_response(err_msg)
         cluster = (
             db.session.query(DruidCluster)
@@ -1872,12 +1873,12 @@ class Superset(BaseSupersetView):
                 "Can't find DruidCluster with cluster_name = " "'%(name)s'",
                 name=cluster_name,
             )
-            logging.error(err_msg)
+            logger.error(err_msg)
             return json_error_response(err_msg)
         try:
             DruidDatasource.sync_to_db_from_config(druid_config, user, cluster)
         except Exception as e:
-            logging.exception(utils.error_msg_from_exception(e))
+            logger.exception(utils.error_msg_from_exception(e))
             return json_error_response(utils.error_msg_from_exception(e))
         return Response(status=201)
 
@@ -1966,7 +1967,7 @@ class Superset(BaseSupersetView):
                     mydb, schema, sql, utils.sources.get("sql_lab")
                 )
         except SupersetTimeoutException as e:
-            logging.exception(e)
+            logger.exception(e)
             return json_error_response(timeout_msg)
         except Exception as e:
             return json_error_response(str(e))
@@ -2064,7 +2065,7 @@ class Superset(BaseSupersetView):
             QueryStatus.SUCCESS,
             QueryStatus.TIMED_OUT,
         ]:
-            logging.error(
+            logger.error(
                 f"Query with client_id {client_id} could not be stopped: query 
already complete"
             )
             return self.json_response("OK")
@@ -2128,7 +2129,7 @@ class Superset(BaseSupersetView):
             )
             return json_success(payload)
         except Exception as e:
-            logging.exception(e)
+            logger.exception(e)
             msg = _(
                 f"{validator.name} was unable to check your query.\n"
                 "Please recheck your query.\n"
@@ -2156,7 +2157,7 @@ class Superset(BaseSupersetView):
         :param query: The query (SQLAlchemy) object
         :return: String JSON response
         """
-        logging.info(f"Query {query.id}: Running query on a Celery worker")
+        logger.info(f"Query {query.id}: Running query on a Celery worker")
         # Ignore the celery future object and the request may time out.
         try:
             sql_lab.get_sql_results.delay(
@@ -2170,7 +2171,7 @@ class Superset(BaseSupersetView):
                 log_params=log_params,
             )
         except Exception as e:
-            logging.exception(f"Query {query.id}: {e}")
+            logger.exception(f"Query {query.id}: {e}")
             msg = _(
                 "Failed to start remote query on a worker. "
                 "Tell your administrator to verify the availability of "
@@ -2232,7 +2233,7 @@ class Superset(BaseSupersetView):
                 encoding=None,
             )
         except Exception as e:
-            logging.exception(f"Query {query.id}: {e}")
+            logger.exception(f"Query {query.id}: {e}")
             return json_error_response(f"{{e}}")
         if data.get("status") == QueryStatus.FAILED:
             return json_error_response(payload=data)
@@ -2260,7 +2261,7 @@ class Superset(BaseSupersetView):
                 query_params.get("templateParams") or "{}"
             )
         except json.JSONDecodeError:
-            logging.warning(
+            logger.warning(
                 f"Invalid template parameter 
{query_params.get('templateParams')}"
                 " specified. Defaulting to empty dict"
             )
@@ -2268,7 +2269,7 @@ class Superset(BaseSupersetView):
         limit: int = query_params.get("queryLimit") or 
app.config["SQL_MAX_ROW"]
         async_flag: bool = cast(bool, query_params.get("runAsync"))
         if limit < 0:
-            logging.warning(
+            logger.warning(
                 f"Invalid limit of {limit} specified. Defaulting to max limit."
             )
             limit = 0
@@ -2310,13 +2311,13 @@ class Superset(BaseSupersetView):
             query_id = query.id
             session.commit()  # shouldn't be necessary
         except SQLAlchemyError as e:
-            logging.error(f"Errors saving query details {e}")
+            logger.error(f"Errors saving query details {e}")
             session.rollback()
             raise Exception(_("Query record was not created as expected."))
         if not query_id:
             raise Exception(_("Query record was not created as expected."))
 
-        logging.info(f"Triggering query_id: {query_id}")
+        logger.info(f"Triggering query_id: {query_id}")
 
         rejected_tables = security_manager.rejected_tables(sql, mydb, schema)
         if rejected_tables:
@@ -2368,7 +2369,7 @@ class Superset(BaseSupersetView):
     @event_logger.log_this
     def csv(self, client_id):
         """Download the query results as csv."""
-        logging.info("Exporting CSV file [{}]".format(client_id))
+        logger.info("Exporting CSV file [{}]".format(client_id))
         query = db.session.query(Query).filter_by(client_id=client_id).one()
 
         rejected_tables = security_manager.rejected_tables(
@@ -2379,12 +2380,12 @@ class Superset(BaseSupersetView):
             return redirect("/")
         blob = None
         if results_backend and query.results_key:
-            logging.info(
+            logger.info(
                 "Fetching CSV from results backend " 
"[{}]".format(query.results_key)
             )
             blob = results_backend.get(query.results_key)
         if blob:
-            logging.info("Decompressing")
+            logger.info("Decompressing")
             payload = utils.zlib_decompress(
                 blob, decode=not results_backend_use_msgpack
             )
@@ -2393,10 +2394,10 @@ class Superset(BaseSupersetView):
             )
             columns = [c["name"] for c in obj["columns"]]
             df = pd.DataFrame.from_records(obj["data"], columns=columns)
-            logging.info("Using pandas to convert to CSV")
+            logger.info("Using pandas to convert to CSV")
             csv = df.to_csv(index=False, **config["CSV_EXPORT"])
         else:
-            logging.info("Running a query to turn into CSV")
+            logger.info("Running a query to turn into CSV")
             sql = query.select_sql or query.executed_sql
             df = query.database.get_df(sql, query.schema)
             # TODO(bkyryliuk): add compression=gzip for big files.
@@ -2414,7 +2415,7 @@ class Superset(BaseSupersetView):
             "sql": query.sql,
             "exported_format": "csv",
         }
-        logging.info(
+        logger.info(
             f"CSV exported: {repr(event_info)}", extra={"superset_event": 
event_info}
         )
         return response
@@ -2697,7 +2698,7 @@ class Superset(BaseSupersetView):
             )
             return self.json_response(schemas_allowed_processed)
         except Exception as e:
-            self.logger.exception(e)
+            logger.exception(e)
             return json_error_response(
                 "Failed to fetch schemas allowed for csv upload in this 
database! "
                 "Please contact your Superset Admin!"
diff --git a/superset/viz.py b/superset/viz.py
index 86abdc1..b6d89fb 100644
--- a/superset/viz.py
+++ b/superset/viz.py
@@ -66,6 +66,7 @@ config = app.config
 stats_logger = config["STATS_LOGGER"]
 relative_start = config["DEFAULT_RELATIVE_START_TIME"]
 relative_end = config["DEFAULT_RELATIVE_END_TIME"]
+logger = logging.getLogger(__name__)
 
 METRIC_KEYS = [
     "metric",
@@ -395,7 +396,7 @@ class BaseViz:
         if not query_obj:
             query_obj = self.query_obj()
         cache_key = self.cache_key(query_obj, **kwargs) if query_obj else None
-        logging.info("Cache key: {}".format(cache_key))
+        logger.info("Cache key: {}".format(cache_key))
         is_loaded = False
         stacktrace = None
         df = None
@@ -414,11 +415,11 @@ class BaseViz:
                     is_loaded = True
                     stats_logger.incr("loaded_from_cache")
                 except Exception as e:
-                    logging.exception(e)
-                    logging.error(
+                    logger.exception(e)
+                    logger.error(
                         "Error reading cache: " + 
utils.error_msg_from_exception(e)
                     )
-                logging.info("Serving from cache")
+                logger.info("Serving from cache")
 
         if query_obj and not is_loaded:
             try:
@@ -427,7 +428,7 @@ class BaseViz:
                     stats_logger.incr("loaded_from_source")
                     is_loaded = True
             except Exception as e:
-                logging.exception(e)
+                logger.exception(e)
                 if not self.error_message:
                     self.error_message = "{}".format(e)
                 self.status = utils.QueryStatus.FAILED
@@ -443,7 +444,7 @@ class BaseViz:
                     cache_value = dict(dttm=cached_dttm, df=df, 
query=self.query)
                     cache_value = pkl.dumps(cache_value, 
protocol=pkl.HIGHEST_PROTOCOL)
 
-                    logging.info(
+                    logger.info(
                         "Caching {} chars at key {}".format(len(cache_value), 
cache_key)
                     )
 
@@ -452,8 +453,8 @@ class BaseViz:
                 except Exception as e:
                     # cache.set call can fail if the backend is down or if
                     # the key is too large or whatever other reasons
-                    logging.warning("Could not cache key {}".format(cache_key))
-                    logging.exception(e)
+                    logger.warning("Could not cache key {}".format(cache_key))
+                    logger.exception(e)
                     cache.delete(cache_key)
         return {
             "cache_key": self._any_cache_key,
diff --git a/tests/core_tests.py b/tests/core_tests.py
index 061e61b..e51712b 100644
--- a/tests/core_tests.py
+++ b/tests/core_tests.py
@@ -54,6 +54,8 @@ from superset.views.database.views import DatabaseView
 
 from .base_tests import SupersetTestCase
 
+logger = logging.getLogger(__name__)
+
 
 class CoreTests(SupersetTestCase):
     def __init__(self, *args, **kwargs):
@@ -321,7 +323,7 @@ class CoreTests(SupersetTestCase):
                 (slc.slice_name, "explore_json", slc.explore_json_url),
             ]
         for name, method, url in urls:
-            logging.info(f"[{name}]/[{method}]: {url}")
+            logger.info(f"[{name}]/[{method}]: {url}")
             print(f"[{name}]/[{method}]: {url}")
             resp = self.client.get(url)
             self.assertEqual(resp.status_code, 200)
diff --git a/tests/email_tests.py b/tests/email_tests.py
index e8fd357..dfbbda0 100644
--- a/tests/email_tests.py
+++ b/tests/email_tests.py
@@ -31,6 +31,7 @@ from tests.base_tests import SupersetTestCase
 from .utils import read_fixture
 
 send_email_test = mock.Mock()
+logger = logging.getLogger(__name__)
 
 
 class EmailSmtpTest(SupersetTestCase):
@@ -47,7 +48,7 @@ class EmailSmtpTest(SupersetTestCase):
         )
         assert mock_send_mime.called
         call_args = mock_send_mime.call_args[0]
-        logging.debug(call_args)
+        logger.debug(call_args)
         assert call_args[0] == app.config["SMTP_MAIL_FROM"]
         assert call_args[1] == ["to"]
         msg = call_args[2]
@@ -64,7 +65,7 @@ class EmailSmtpTest(SupersetTestCase):
         )
         assert mock_send_mime.called
         call_args = mock_send_mime.call_args[0]
-        logging.debug(call_args)
+        logger.debug(call_args)
         assert call_args[0] == app.config["SMTP_MAIL_FROM"]
         assert call_args[1] == ["to"]
         msg = call_args[2]
@@ -82,7 +83,7 @@ class EmailSmtpTest(SupersetTestCase):
         )
         assert mock_send_mime.called
         call_args = mock_send_mime.call_args[0]
-        logging.debug(call_args)
+        logger.debug(call_args)
         assert call_args[0] == app.config["SMTP_MAIL_FROM"]
         assert call_args[1] == ["to"]
         msg = call_args[2]
diff --git a/tests/viz_tests.py b/tests/viz_tests.py
index 2b1752d..d5bf6f3 100644
--- a/tests/viz_tests.py
+++ b/tests/viz_tests.py
@@ -17,6 +17,7 @@
 # isort:skip_file
 import uuid
 from datetime import datetime
+import logging
 from math import nan
 from unittest.mock import Mock, patch
 
@@ -33,6 +34,8 @@ from superset.utils.core import DTTM_ALIAS
 from .base_tests import SupersetTestCase
 from .utils import load_fixture
 
+logger = logging.getLogger(__name__)
+
 
 class BaseVizTestCase(SupersetTestCase):
     def test_constructor_exception_no_datasource(self):
@@ -116,7 +119,7 @@ class BaseVizTestCase(SupersetTestCase):
         result = test_viz.get_df(query_obj)
         import logging
 
-        logging.info(result)
+        logger.info(result)
         pd.testing.assert_series_equal(
             result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 5, 0)], 
name=DTTM_ALIAS)
         )

Reply via email to