This is an automated email from the ASF dual-hosted git repository.
arivero pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/superset.git
The following commit(s) were added to refs/heads/master by this push:
new 4dfece9ee54 feat(mcp): add event_logger instrumentation to MCP tools
(#37859)
4dfece9ee54 is described below
commit 4dfece9ee54f917d77cee1d69b5b15b617f32395
Author: Amin Ghadersohi <[email protected]>
AuthorDate: Thu Feb 12 10:50:20 2026 -0500
feat(mcp): add event_logger instrumentation to MCP tools (#37859)
---
UPDATING.md | 36 +++
superset/mcp_service/chart/tool/generate_chart.py | 270 +++++++++++----------
superset/mcp_service/chart/tool/get_chart_data.py | 80 +++---
superset/mcp_service/chart/tool/get_chart_info.py | 20 +-
.../mcp_service/chart/tool/get_chart_preview.py | 149 ++++++------
superset/mcp_service/chart/tool/list_charts.py | 27 ++-
superset/mcp_service/chart/tool/update_chart.py | 83 ++++---
.../mcp_service/chart/tool/update_chart_preview.py | 23 +-
.../tool/add_chart_to_existing_dashboard.py | 135 ++++++-----
.../dashboard/tool/generate_dashboard.py | 99 ++++----
.../dashboard/tool/get_dashboard_info.py | 20 +-
.../mcp_service/dashboard/tool/list_dashboards.py | 25 +-
.../mcp_service/dataset/tool/get_dataset_info.py | 20 +-
superset/mcp_service/dataset/tool/list_datasets.py | 28 ++-
.../explore/tool/generate_explore_link.py | 20 +-
superset/mcp_service/middleware.py | 118 +++++++--
superset/mcp_service/sql_lab/tool/execute_sql.py | 44 ++--
.../sql_lab/tool/open_sql_lab_with_context.py | 6 +-
.../mcp_service/system/tool/get_instance_info.py | 4 +-
superset/mcp_service/system/tool/get_schema.py | 6 +-
superset/mcp_service/system/tool/health_check.py | 8 +-
tests/unit_tests/extensions/test_types.py | 11 +-
.../mcp_service/test_middleware_logging.py | 207 ++++++++++++++++
23 files changed, 931 insertions(+), 508 deletions(-)
diff --git a/UPDATING.md b/UPDATING.md
index 7329e6c6aca..83e22ced382 100644
--- a/UPDATING.md
+++ b/UPDATING.md
@@ -24,6 +24,42 @@ assists people when migrating to a new version.
## Next
+### MCP Tool Observability
+
+MCP (Model Context Protocol) tools now include enhanced observability
instrumentation for monitoring and debugging:
+
+**Two-layer instrumentation:**
+1. **Middleware layer** (`LoggingMiddleware`): Automatically logs all MCP tool
calls with `duration_ms` and `success` status in the audit log (Action Log UI,
logs table)
+2. **Sub-operation tracking**: All 19 MCP tools include granular
`event_logger.log_context()` blocks for tracking individual operations like
validation, database writes, and query execution
+
+**Action naming convention:**
+- Tool-level logs: `mcp_tool_call` (via middleware)
+- Sub-operation logs: `mcp.{tool_name}.{operation}` (e.g.,
`mcp.generate_chart.validation`, `mcp.execute_sql.query_execution`)
+
+**Querying MCP logs:**
+```sql
+-- Top slowest MCP operations
+SELECT action, COUNT(*) as calls, AVG(duration_ms) as avg_ms
+FROM logs
+WHERE action LIKE 'mcp.%'
+GROUP BY action
+ORDER BY avg_ms DESC
+LIMIT 20;
+
+-- MCP tool success rate
+SELECT
+ json_extract(curated_payload, '$.tool') as tool,
+ COUNT(*) as total_calls,
+ SUM(CASE WHEN json_extract(curated_payload, '$.success') = 'true' THEN 1
ELSE 0 END) as successful,
+ ROUND(100.0 * SUM(CASE WHEN json_extract(curated_payload, '$.success') =
'true' THEN 1 ELSE 0 END) / COUNT(*), 2) as success_rate
+FROM logs
+WHERE action = 'mcp_tool_call'
+GROUP BY tool
+ORDER BY total_calls DESC;
+```
+
+**Security note:** Sensitive parameters (passwords, API keys, tokens) are
automatically redacted in logs as `[REDACTED]`.
+
### Signal Cache Backend
A new `SIGNAL_CACHE_CONFIG` configuration provides a unified Redis-based
backend for real-time coordination features in Superset. This backend enables:
diff --git a/superset/mcp_service/chart/tool/generate_chart.py
b/superset/mcp_service/chart/tool/generate_chart.py
index ee555ca1d15..913a46bc6cd 100644
--- a/superset/mcp_service/chart/tool/generate_chart.py
+++ b/superset/mcp_service/chart/tool/generate_chart.py
@@ -25,6 +25,7 @@ from urllib.parse import parse_qs, urlparse
from fastmcp import Context
from superset_core.mcp import tool
+from superset.extensions import event_logger
from superset.mcp_service.auth import has_dataset_access
from superset.mcp_service.chart.chart_utils import (
analyze_chart_capabilities,
@@ -132,23 +133,24 @@ async def generate_chart( # noqa: C901
await ctx.debug(
"Validating chart request: dataset_id=%s" % (request.dataset_id,)
)
- from superset.mcp_service.chart.validation import ValidationPipeline
+ with event_logger.log_context(action="mcp.generate_chart.validation"):
+ from superset.mcp_service.chart.validation import
ValidationPipeline
- validation_result = ValidationPipeline.validate_request_with_warnings(
- request.model_dump()
- )
+ validation_result =
ValidationPipeline.validate_request_with_warnings(
+ request.model_dump()
+ )
- if validation_result.is_valid and validation_result.request is not
None:
- # Use the validated request going forward
- request = validation_result.request
+ if validation_result.is_valid and validation_result.request is not
None:
+ # Use the validated request going forward
+ request = validation_result.request
- # Capture runtime warnings (informational, not blocking)
- if validation_result.warnings:
- runtime_warnings = validation_result.warnings.get("warnings", [])
- if runtime_warnings:
- await ctx.info(
- "Runtime suggestions: %s" % (";
".join(runtime_warnings[:3]),)
- )
+ # Capture runtime warnings (informational, not blocking)
+ if validation_result.warnings:
+ runtime_warnings = validation_result.warnings.get("warnings",
[])
+ if runtime_warnings:
+ await ctx.info(
+ "Runtime suggestions: %s" % (";
".join(runtime_warnings[:3]),)
+ )
if not validation_result.is_valid:
execution_time = int((time.time() - start_time) * 1000)
@@ -197,35 +199,38 @@ async def generate_chart( # noqa: C901
from superset.daos.dataset import DatasetDAO
await ctx.debug("Looking up dataset: dataset_id=%s" %
(request.dataset_id,))
- dataset = None
- if isinstance(request.dataset_id, int) or (
- isinstance(request.dataset_id, str) and
request.dataset_id.isdigit()
- ):
- dataset_id = (
- int(request.dataset_id)
- if isinstance(request.dataset_id, str)
- else request.dataset_id
- )
- dataset = DatasetDAO.find_by_id(dataset_id)
- # SECURITY FIX: Also validate permissions for numeric ID access
- if dataset and not has_dataset_access(dataset):
- logger.warning(
- "User %s attempted to access dataset %s without
permission",
- ctx.user.username if hasattr(ctx, "user") else
"unknown",
- dataset_id,
+ with
event_logger.log_context(action="mcp.generate_chart.dataset_lookup"):
+ dataset = None
+ if isinstance(request.dataset_id, int) or (
+ isinstance(request.dataset_id, str) and
request.dataset_id.isdigit()
+ ):
+ dataset_id = (
+ int(request.dataset_id)
+ if isinstance(request.dataset_id, str)
+ else request.dataset_id
)
- dataset = None # Treat as not found
- else:
- # SECURITY FIX: Try UUID lookup with permission validation
- dataset = DatasetDAO.find_by_id(request.dataset_id,
id_column="uuid")
- # Validate permissions for UUID-based access
- if dataset and not has_dataset_access(dataset):
- logger.warning(
- "User %s attempted access dataset %s via UUID",
- ctx.user.username if hasattr(ctx, "user") else
"unknown",
- request.dataset_id,
+ dataset = DatasetDAO.find_by_id(dataset_id)
+ # SECURITY FIX: Also validate permissions for numeric ID
access
+ if dataset and not has_dataset_access(dataset):
+ logger.warning(
+ "User %s attempted to access dataset %s without
permission",
+ ctx.user.username if hasattr(ctx, "user") else
"unknown",
+ dataset_id,
+ )
+ dataset = None # Treat as not found
+ else:
+ # SECURITY FIX: Try UUID lookup with permission validation
+ dataset = DatasetDAO.find_by_id(
+ request.dataset_id, id_column="uuid"
)
- dataset = None # Treat as not found
+ # Validate permissions for UUID-based access
+ if dataset and not has_dataset_access(dataset):
+ logger.warning(
+ "User %s attempted access dataset %s via UUID",
+ ctx.user.username if hasattr(ctx, "user") else
"unknown",
+ request.dataset_id,
+ )
+ dataset = None # Treat as not found
if not dataset:
await ctx.error(
@@ -267,22 +272,25 @@ async def generate_chart( # noqa: C901
)
try:
- command = CreateChartCommand(
- {
- "slice_name": chart_name,
- "viz_type": form_data["viz_type"],
- "datasource_id": dataset.id,
- "datasource_type": "table",
- "params": json.dumps(form_data),
- }
- )
+ with
event_logger.log_context(action="mcp.generate_chart.db_write"):
+ command = CreateChartCommand(
+ {
+ "slice_name": chart_name,
+ "viz_type": form_data["viz_type"],
+ "datasource_id": dataset.id,
+ "datasource_type": "table",
+ "params": json.dumps(form_data),
+ }
+ )
- chart = command.run()
- chart_id = chart.id
+ chart = command.run()
+ chart_id = chart.id
- # Ensure chart was created successfully before committing
- if not chart or not chart.id:
- raise Exception("Chart creation failed - no chart ID
returned")
+ # Ensure chart was created successfully before committing
+ if not chart or not chart.id:
+ raise RuntimeError(
+ "Chart creation failed - no chart ID returned"
+ )
await ctx.info(
"Chart created successfully: chart_id=%s, chart_name=%s"
@@ -301,35 +309,39 @@ async def generate_chart( # noqa: C901
# Generate form_data_key for saved charts (needed for chatbot
rendering)
try:
- from superset.commands.explore.form_data.parameters import (
- CommandParameters,
- )
- from superset.mcp_service.commands.create_form_data import (
- MCPCreateFormDataCommand,
- )
- from superset.utils.core import DatasourceType
+ with event_logger.log_context(
+ action="mcp.generate_chart.form_data_cache"
+ ):
+ from superset.commands.explore.form_data.parameters import
(
+ CommandParameters,
+ )
+ from superset.mcp_service.commands.create_form_data import
(
+ MCPCreateFormDataCommand,
+ )
+ from superset.utils.core import DatasourceType
- # Add datasource to form_data for the cache
- form_data_with_datasource = {
- **form_data,
- "datasource": f"{dataset.id}__table",
- }
+ # Add datasource to form_data for the cache
+ form_data_with_datasource = {
+ **form_data,
+ "datasource": f"{dataset.id}__table",
+ }
- cmd_params = CommandParameters(
- datasource_type=DatasourceType.TABLE,
- datasource_id=dataset.id,
- chart_id=chart.id,
- tab_id=None,
- form_data=json.dumps(form_data_with_datasource),
- )
- form_data_key = MCPCreateFormDataCommand(cmd_params).run()
- await ctx.debug(
- "Generated form_data_key for saved chart: form_data_key=%s"
- % (form_data_key,)
- )
+ cmd_params = CommandParameters(
+ datasource_type=DatasourceType.TABLE,
+ datasource_id=dataset.id,
+ chart_id=chart.id,
+ tab_id=None,
+ form_data=json.dumps(form_data_with_datasource),
+ )
+ form_data_key = MCPCreateFormDataCommand(cmd_params).run()
+ await ctx.debug(
+ "Generated form_data_key for saved chart: "
+ "form_data_key=%s" % (form_data_key,)
+ )
except Exception as fdk_error:
logger.warning(
- "Failed to generate form_data_key for saved chart: %s",
fdk_error
+ "Failed to generate form_data_key for saved chart: %s",
+ fdk_error,
)
await ctx.warning(
"Failed to generate form_data_key: error=%s" %
(str(fdk_error),)
@@ -383,60 +395,66 @@ async def generate_chart( # noqa: C901
"Generating previews: formats=%s" %
(str(request.preview_formats),)
)
try:
- for format_type in request.preview_formats:
- await ctx.debug(
- "Processing preview format: format=%s" % (format_type,)
- )
-
- if chart_id:
- # For saved charts, use the existing preview generation
- from superset.mcp_service.chart.tool.get_chart_preview
import (
- _get_chart_preview_internal,
- GetChartPreviewRequest,
+ with
event_logger.log_context(action="mcp.generate_chart.preview"):
+ for format_type in request.preview_formats:
+ await ctx.debug(
+ "Processing preview format: format=%s" %
(format_type,)
)
- preview_request = GetChartPreviewRequest(
- identifier=str(chart_id), format=format_type
- )
- preview_result = await _get_chart_preview_internal(
- preview_request, ctx
- )
+ if chart_id:
+ # For saved charts, use the existing preview
+ from
superset.mcp_service.chart.tool.get_chart_preview import ( # noqa: E501
+ _get_chart_preview_internal,
+ GetChartPreviewRequest,
+ )
- if hasattr(preview_result, "content"):
- previews[format_type] = preview_result.content
- else:
- # For preview-only mode (save_chart=false)
- # Note: Screenshot-based URL previews are not
supported.
- # Use the explore_url to view the chart interactively.
- if format_type in ["ascii", "table", "vega_lite"]:
- # Generate preview from form data without saved
chart
- from superset.mcp_service.chart.preview_utils
import (
- generate_preview_from_form_data,
+ preview_request = GetChartPreviewRequest(
+ identifier=str(chart_id), format=format_type
+ )
+ preview_result = await _get_chart_preview_internal(
+ preview_request, ctx
)
- # Convert dataset_id to int only if it's numeric
- if (
- isinstance(request.dataset_id, str)
- and request.dataset_id.isdigit()
- ):
- dataset_id_for_preview =
int(request.dataset_id)
- elif isinstance(request.dataset_id, int):
- dataset_id_for_preview = request.dataset_id
- else:
- # Skip preview generation for non-numeric
dataset IDs
- logger.warning(
- "Cannot generate preview for non-numeric "
+ if hasattr(preview_result, "content"):
+ previews[format_type] = preview_result.content
+ else:
+ # For preview-only mode (save_chart=false)
+ # Note: Screenshot-based URL previews are not
+ # supported. Use explore_url to view interactively.
+ if format_type in [
+ "ascii",
+ "table",
+ "vega_lite",
+ ]:
+ # Generate preview from form data
+ from superset.mcp_service.chart.preview_utils
import (
+ generate_preview_from_form_data,
)
- continue
- preview_result = generate_preview_from_form_data(
- form_data=form_data,
- dataset_id=dataset_id_for_preview,
- preview_format=format_type,
- )
+ # Convert dataset_id to int only if numeric
+ if (
+ isinstance(request.dataset_id, str)
+ and request.dataset_id.isdigit()
+ ):
+ dataset_id_for_preview =
int(request.dataset_id)
+ elif isinstance(request.dataset_id, int):
+ dataset_id_for_preview = request.dataset_id
+ else:
+ # Skip for non-numeric dataset IDs
+ logger.warning(
+ "Cannot generate preview for"
+ " non-numeric dataset IDs"
+ )
+ continue
+
+ preview_result =
generate_preview_from_form_data(
+ form_data=form_data,
+ dataset_id=dataset_id_for_preview,
+ preview_format=format_type,
+ )
- if not hasattr(preview_result, "error"):
- previews[format_type] = preview_result
+ if not hasattr(preview_result, "error"):
+ previews[format_type] = preview_result
except Exception as e:
# Log warning but don't fail the entire request
diff --git a/superset/mcp_service/chart/tool/get_chart_data.py
b/superset/mcp_service/chart/tool/get_chart_data.py
index 91d478b9a54..a99df31dd94 100644
--- a/superset/mcp_service/chart/tool/get_chart_data.py
+++ b/superset/mcp_service/chart/tool/get_chart_data.py
@@ -29,6 +29,7 @@ from superset_core.mcp import tool
if TYPE_CHECKING:
from superset.models.slice import Slice
+from superset.extensions import event_logger
from superset.mcp_service.chart.schemas import (
ChartData,
ChartError,
@@ -82,25 +83,27 @@ async def get_chart_data( # noqa: C901
from superset.utils import json as utils_json
# Find the chart
- chart = None
- if isinstance(request.identifier, int) or (
- isinstance(request.identifier, str) and
request.identifier.isdigit()
- ):
- chart_id = (
- int(request.identifier)
- if isinstance(request.identifier, str)
- else request.identifier
- )
- await ctx.debug(
- "Performing ID-based chart lookup: chart_id=%s" % (chart_id,)
- )
- chart = ChartDAO.find_by_id(chart_id)
- else:
- await ctx.debug(
- "Performing UUID-based chart lookup: uuid=%s" %
(request.identifier,)
- )
- # Try UUID lookup using DAO flexible method
- chart = ChartDAO.find_by_id(request.identifier, id_column="uuid")
+ with
event_logger.log_context(action="mcp.get_chart_data.chart_lookup"):
+ chart = None
+ if isinstance(request.identifier, int) or (
+ isinstance(request.identifier, str) and
request.identifier.isdigit()
+ ):
+ chart_id = (
+ int(request.identifier)
+ if isinstance(request.identifier, str)
+ else request.identifier
+ )
+ await ctx.debug(
+ "Performing ID-based chart lookup: chart_id=%s" %
(chart_id,)
+ )
+ chart = ChartDAO.find_by_id(chart_id)
+ else:
+ await ctx.debug(
+ "Performing UUID-based chart lookup: uuid=%s"
+ % (request.identifier,)
+ )
+ # Try UUID lookup using DAO flexible method
+ chart = ChartDAO.find_by_id(request.identifier,
id_column="uuid")
if not chart:
await ctx.error("Chart not found: identifier=%s" %
(request.identifier,))
@@ -232,8 +235,9 @@ async def get_chart_data( # noqa: C901
)
# Execute the query
- command = ChartDataCommand(query_context)
- result = command.run()
+ with
event_logger.log_context(action="mcp.get_chart_data.query_execution"):
+ command = ChartDataCommand(query_context)
+ result = command.run()
# Handle empty query results for certain chart types
if not result or ("queries" not in result) or
len(result["queries"]) == 0:
@@ -385,21 +389,27 @@ async def get_chart_data( # noqa: C901
# Handle different export formats
if request.format == "csv":
- return _export_data_as_csv(
- chart,
- data[: request.limit] if request.limit else data,
- raw_columns,
- cache_status,
- performance,
- )
+ with event_logger.log_context(
+ action="mcp.get_chart_data.format_conversion"
+ ):
+ return _export_data_as_csv(
+ chart,
+ data[: request.limit] if request.limit else data,
+ raw_columns,
+ cache_status,
+ performance,
+ )
elif request.format == "excel":
- return _export_data_as_excel(
- chart,
- data[: request.limit] if request.limit else data,
- raw_columns,
- cache_status,
- performance,
- )
+ with event_logger.log_context(
+ action="mcp.get_chart_data.format_conversion"
+ ):
+ return _export_data_as_excel(
+ chart,
+ data[: request.limit] if request.limit else data,
+ raw_columns,
+ cache_status,
+ performance,
+ )
await ctx.report_progress(4, 4, "Building response")
diff --git a/superset/mcp_service/chart/tool/get_chart_info.py
b/superset/mcp_service/chart/tool/get_chart_info.py
index 28407750178..d25354acd02 100644
--- a/superset/mcp_service/chart/tool/get_chart_info.py
+++ b/superset/mcp_service/chart/tool/get_chart_info.py
@@ -24,6 +24,7 @@ import logging
from fastmcp import Context
from superset_core.mcp import tool
+from superset.extensions import event_logger
from superset.mcp_service.chart.schemas import (
ChartError,
ChartInfo,
@@ -71,16 +72,17 @@ async def get_chart_info(
"Retrieving chart information: identifier=%s" % (request.identifier,)
)
- tool = ModelGetInfoCore(
- dao_class=ChartDAO,
- output_schema=ChartInfo,
- error_schema=ChartError,
- serializer=serialize_chart_object,
- supports_slug=False, # Charts don't have slugs
- logger=logger,
- )
+ with event_logger.log_context(action="mcp.get_chart_info.lookup"):
+ tool = ModelGetInfoCore(
+ dao_class=ChartDAO,
+ output_schema=ChartInfo,
+ error_schema=ChartError,
+ serializer=serialize_chart_object,
+ supports_slug=False, # Charts don't have slugs
+ logger=logger,
+ )
- result = tool.run_tool(request.identifier)
+ result = tool.run_tool(request.identifier)
if isinstance(result, ChartInfo):
await ctx.info(
diff --git a/superset/mcp_service/chart/tool/get_chart_preview.py
b/superset/mcp_service/chart/tool/get_chart_preview.py
index fbc1a5802be..fcce93d99bd 100644
--- a/superset/mcp_service/chart/tool/get_chart_preview.py
+++ b/superset/mcp_service/chart/tool/get_chart_preview.py
@@ -25,6 +25,7 @@ from typing import Any, Dict, List, Protocol
from fastmcp import Context
from superset_core.mcp import tool
+from superset.extensions import event_logger
from superset.mcp_service.chart.schemas import (
AccessibilityMetadata,
ASCIIPreview,
@@ -1807,66 +1808,72 @@ async def _get_chart_preview_internal( # noqa: C901
from superset.daos.chart import ChartDAO
# Find the chart
- chart: Any = None
- if isinstance(request.identifier, int) or (
- isinstance(request.identifier, str) and
request.identifier.isdigit()
- ):
- chart_id = (
- int(request.identifier)
- if isinstance(request.identifier, str)
- else request.identifier
- )
- await ctx.debug(
- "Performing ID-based chart lookup: chart_id=%s" % (chart_id,)
- )
- chart = ChartDAO.find_by_id(chart_id)
- else:
- await ctx.debug(
- "Performing UUID-based chart lookup: uuid=%s" %
(request.identifier,)
- )
- # Try UUID lookup using DAO flexible method
- chart = ChartDAO.find_by_id(request.identifier, id_column="uuid")
-
- # If not found and looks like a form_data_key, try to create
transient chart
- if (
- not chart
- and isinstance(request.identifier, str)
- and len(request.identifier) > 8
+ with
event_logger.log_context(action="mcp.get_chart_preview.chart_lookup"):
+ chart: Any = None
+ if isinstance(request.identifier, int) or (
+ isinstance(request.identifier, str) and
request.identifier.isdigit()
):
- # This might be a form_data_key, try to get form data from
cache
- from superset.commands.explore.form_data.get import
GetFormDataCommand
- from superset.commands.explore.form_data.parameters import (
- CommandParameters,
+ chart_id = (
+ int(request.identifier)
+ if isinstance(request.identifier, str)
+ else request.identifier
)
-
- try:
- cmd_params = CommandParameters(key=request.identifier)
- cmd = GetFormDataCommand(cmd_params)
- form_data_json = cmd.run()
- if form_data_json:
- from superset.utils import json as utils_json
-
- form_data = utils_json.loads(form_data_json)
-
- # Create a transient chart object from form data
- class TransientChart:
- def __init__(self, form_data: Dict[str, Any]):
- self.id = None
- self.slice_name = "Unsaved Chart Preview"
- self.viz_type = form_data.get("viz_type",
"table")
- self.datasource_id = None
- self.datasource_type = "table"
- self.params = utils_json.dumps(form_data)
- self.form_data = form_data
- self.uuid = None
-
- chart = TransientChart(form_data)
- except Exception as e:
- # Form data key not found or invalid
- logger.debug(
- "Failed to get form data for key %s: %s",
request.identifier, e
+ await ctx.debug(
+ "Performing ID-based chart lookup: chart_id=%s" %
(chart_id,)
+ )
+ chart = ChartDAO.find_by_id(chart_id)
+ else:
+ await ctx.debug(
+ "Performing UUID-based chart lookup: uuid=%s"
+ % (request.identifier,)
+ )
+ # Try UUID lookup using DAO flexible method
+ chart = ChartDAO.find_by_id(request.identifier,
id_column="uuid")
+
+ # If not found and looks like a form_data_key, try transient
+ if (
+ not chart
+ and isinstance(request.identifier, str)
+ and len(request.identifier) > 8
+ ):
+ # This might be a form_data_key
+ from superset.commands.explore.form_data.get import (
+ GetFormDataCommand,
+ )
+ from superset.commands.explore.form_data.parameters import
(
+ CommandParameters,
)
+ try:
+ cmd_params = CommandParameters(key=request.identifier)
+ cmd = GetFormDataCommand(cmd_params)
+ form_data_json = cmd.run()
+ if form_data_json:
+ from superset.utils import json as utils_json
+
+ form_data = utils_json.loads(form_data_json)
+
+ # Create a transient chart object from form data
+ class TransientChart:
+ def __init__(self, form_data: Dict[str, Any]):
+ self.id = None
+ self.slice_name = "Unsaved Chart Preview"
+ self.viz_type = form_data.get("viz_type",
"table")
+ self.datasource_id = None
+ self.datasource_type = "table"
+ self.params = utils_json.dumps(form_data)
+ self.form_data = form_data
+ self.uuid = None
+
+ chart = TransientChart(form_data)
+ except (ValueError, KeyError, AttributeError, TypeError)
as e:
+ # Form data key not found or invalid
+ logger.debug(
+ "Failed to get form data for key %s: %s",
+ request.identifier,
+ e,
+ )
+
if not chart:
await ctx.error("Chart not found: identifier=%s" %
(request.identifier,))
return ChartError(
@@ -1911,8 +1918,11 @@ async def _get_chart_preview_internal( # noqa: C901
)
# Handle different preview formats using strategy pattern
- preview_generator = PreviewFormatGenerator(chart, request)
- content = preview_generator.generate()
+ with event_logger.log_context(
+ action="mcp.get_chart_preview.preview_generation"
+ ):
+ preview_generator = PreviewFormatGenerator(chart, request)
+ content = preview_generator.generate()
if isinstance(content, ChartError):
await ctx.error(
@@ -1930,18 +1940,19 @@ async def _get_chart_preview_internal( # noqa: C901
await ctx.report_progress(3, 3, "Building response")
# Create performance and accessibility metadata
- execution_time = int((time.time() - start_time) * 1000)
- performance = PerformanceMetadata(
- query_duration_ms=execution_time,
- cache_status="miss",
- optimization_suggestions=[],
- )
+ with event_logger.log_context(action="mcp.get_chart_preview.metadata"):
+ execution_time = int((time.time() - start_time) * 1000)
+ performance = PerformanceMetadata(
+ query_duration_ms=execution_time,
+ cache_status="miss",
+ optimization_suggestions=[],
+ )
- accessibility = AccessibilityMetadata(
- color_blind_safe=True,
- alt_text=f"Preview of {chart.slice_name or f'Chart {chart.id}'}",
- high_contrast_available=False,
- )
+ accessibility = AccessibilityMetadata(
+ color_blind_safe=True,
+ alt_text=f"Preview of {chart.slice_name or f'Chart
{chart.id}'}",
+ high_contrast_available=False,
+ )
await ctx.debug(
"Preview generation completed: execution_time_ms=%s,
content_type=%s"
diff --git a/superset/mcp_service/chart/tool/list_charts.py
b/superset/mcp_service/chart/tool/list_charts.py
index 37ddbcfc503..7eccfc5f42e 100644
--- a/superset/mcp_service/chart/tool/list_charts.py
+++ b/superset/mcp_service/chart/tool/list_charts.py
@@ -28,6 +28,7 @@ from superset_core.mcp import tool
if TYPE_CHECKING:
from superset.models.slice import Slice
+from superset.extensions import event_logger
from superset.mcp_service.chart.schemas import (
ChartFilter,
ChartInfo,
@@ -121,15 +122,16 @@ async def list_charts(request: ListChartsRequest, ctx:
Context) -> ChartList:
)
try:
- result = tool.run_tool(
- filters=request.filters,
- search=request.search,
- select_columns=request.select_columns,
- order_column=request.order_column,
- order_direction=request.order_direction,
- page=max(request.page - 1, 0),
- page_size=request.page_size,
- )
+ with event_logger.log_context(action="mcp.list_charts.query"):
+ result = tool.run_tool(
+ filters=request.filters,
+ search=request.search,
+ select_columns=request.select_columns,
+ order_column=request.order_column,
+ order_direction=request.order_direction,
+ page=max(request.page - 1, 0),
+ page_size=request.page_size,
+ )
count = len(result.charts) if hasattr(result, "charts") else 0
total_pages = getattr(result, "total_pages", None)
await ctx.info(
@@ -145,9 +147,10 @@ async def list_charts(request: ListChartsRequest, ctx:
Context) -> ChartList:
"Applying field filtering via serialization context: columns=%s"
% (columns_to_filter,)
)
- return result.model_dump(
- mode="json", context={"select_columns": columns_to_filter}
- )
+ with event_logger.log_context(action="mcp.list_charts.serialization"):
+ return result.model_dump(
+ mode="json", context={"select_columns": columns_to_filter}
+ )
except Exception as e:
await ctx.error("Failed to list charts: %s" % (str(e),))
raise
diff --git a/superset/mcp_service/chart/tool/update_chart.py
b/superset/mcp_service/chart/tool/update_chart.py
index a1b720f380d..cf6e15b7356 100644
--- a/superset/mcp_service/chart/tool/update_chart.py
+++ b/superset/mcp_service/chart/tool/update_chart.py
@@ -25,6 +25,7 @@ import time
from fastmcp import Context
from superset_core.mcp import tool
+from superset.extensions import event_logger
from superset.mcp_service.chart.chart_utils import (
analyze_chart_capabilities,
analyze_chart_semantics,
@@ -99,19 +100,20 @@ async def update_chart(
# Find the existing chart
from superset.daos.chart import ChartDAO
- chart = None
- if isinstance(request.identifier, int) or (
- isinstance(request.identifier, str) and
request.identifier.isdigit()
- ):
- chart_id = (
- int(request.identifier)
- if isinstance(request.identifier, str)
- else request.identifier
- )
- chart = ChartDAO.find_by_id(chart_id)
- else:
- # Try UUID lookup using DAO flexible method
- chart = ChartDAO.find_by_id(request.identifier, id_column="uuid")
+ with event_logger.log_context(action="mcp.update_chart.chart_lookup"):
+ chart = None
+ if isinstance(request.identifier, int) or (
+ isinstance(request.identifier, str) and
request.identifier.isdigit()
+ ):
+ chart_id = (
+ int(request.identifier)
+ if isinstance(request.identifier, str)
+ else request.identifier
+ )
+ chart = ChartDAO.find_by_id(chart_id)
+ else:
+ # Try UUID lookup using DAO flexible method
+ chart = ChartDAO.find_by_id(request.identifier,
id_column="uuid")
if not chart:
return GenerateChartResponse.model_validate(
@@ -132,21 +134,22 @@ async def update_chart(
# Update chart using Superset's command
from superset.commands.chart.update import UpdateChartCommand
- # Generate new chart name if provided, otherwise keep existing
- chart_name = (
- request.chart_name
- if request.chart_name
- else chart.slice_name or generate_chart_name(request.config)
- )
+ with event_logger.log_context(action="mcp.update_chart.db_write"):
+ # Generate new chart name if provided, otherwise keep existing
+ chart_name = (
+ request.chart_name
+ if request.chart_name
+ else chart.slice_name or generate_chart_name(request.config)
+ )
- update_payload = {
- "slice_name": chart_name,
- "viz_type": new_form_data["viz_type"],
- "params": json.dumps(new_form_data),
- }
+ update_payload = {
+ "slice_name": chart_name,
+ "viz_type": new_form_data["viz_type"],
+ "params": json.dumps(new_form_data),
+ }
- command = UpdateChartCommand(chart.id, update_payload)
- updated_chart = command.run()
+ command = UpdateChartCommand(chart.id, update_payload)
+ updated_chart = command.run()
# Generate semantic analysis
capabilities = analyze_chart_capabilities(updated_chart,
request.config)
@@ -176,21 +179,23 @@ async def update_chart(
previews = {}
if request.generate_preview:
try:
- from superset.mcp_service.chart.tool.get_chart_preview import (
- _get_chart_preview_internal,
- GetChartPreviewRequest,
- )
-
- for format_type in request.preview_formats:
- preview_request = GetChartPreviewRequest(
- identifier=str(updated_chart.id), format=format_type
- )
- preview_result = await _get_chart_preview_internal(
- preview_request, ctx
+ with
event_logger.log_context(action="mcp.update_chart.preview"):
+ from superset.mcp_service.chart.tool.get_chart_preview
import (
+ _get_chart_preview_internal,
+ GetChartPreviewRequest,
)
- if hasattr(preview_result, "content"):
- previews[format_type] = preview_result.content
+ for format_type in request.preview_formats:
+ preview_request = GetChartPreviewRequest(
+ identifier=str(updated_chart.id),
+ format=format_type,
+ )
+ preview_result = await _get_chart_preview_internal(
+ preview_request, ctx
+ )
+
+ if hasattr(preview_result, "content"):
+ previews[format_type] = preview_result.content
except Exception as e:
# Log warning but don't fail the entire request
diff --git a/superset/mcp_service/chart/tool/update_chart_preview.py
b/superset/mcp_service/chart/tool/update_chart_preview.py
index 16893fcfbd0..fd8dc680ff1 100644
--- a/superset/mcp_service/chart/tool/update_chart_preview.py
+++ b/superset/mcp_service/chart/tool/update_chart_preview.py
@@ -26,6 +26,7 @@ from typing import Any, Dict
from fastmcp import Context
from superset_core.mcp import tool
+from superset.extensions import event_logger
from superset.mcp_service.chart.chart_utils import (
analyze_chart_capabilities,
analyze_chart_semantics,
@@ -65,23 +66,25 @@ def update_chart_preview(
start_time = time.time()
try:
- # Map the new config to form_data format
- # Pass dataset_id to enable column type checking for proper viz_type
selection
- new_form_data = map_config_to_form_data(
- request.config, dataset_id=request.dataset_id
- )
+ with
event_logger.log_context(action="mcp.update_chart_preview.form_data"):
+ # Map the new config to form_data format
+ # Pass dataset_id to enable column type checking
+ new_form_data = map_config_to_form_data(
+ request.config, dataset_id=request.dataset_id
+ )
- # Generate new explore link with updated form_data
- explore_url = generate_explore_link(request.dataset_id, new_form_data)
+ # Generate new explore link with updated form_data
+ explore_url = generate_explore_link(request.dataset_id,
new_form_data)
# Extract new form_data_key from the explore URL
new_form_data_key = None
if "form_data_key=" in explore_url:
new_form_data_key =
explore_url.split("form_data_key=")[1].split("&")[0]
- # Generate semantic analysis
- capabilities = analyze_chart_capabilities(None, request.config)
- semantics = analyze_chart_semantics(None, request.config)
+ with
event_logger.log_context(action="mcp.update_chart_preview.metadata"):
+ # Generate semantic analysis
+ capabilities = analyze_chart_capabilities(None, request.config)
+ semantics = analyze_chart_semantics(None, request.config)
# Create performance metadata
execution_time = int((time.time() - start_time) * 1000)
diff --git
a/superset/mcp_service/dashboard/tool/add_chart_to_existing_dashboard.py
b/superset/mcp_service/dashboard/tool/add_chart_to_existing_dashboard.py
index a201e22edc6..86275cfb3ce 100644
--- a/superset/mcp_service/dashboard/tool/add_chart_to_existing_dashboard.py
+++ b/superset/mcp_service/dashboard/tool/add_chart_to_existing_dashboard.py
@@ -27,6 +27,7 @@ from typing import Any, Dict
from fastmcp import Context
from superset_core.mcp import tool
+from superset.extensions import event_logger
from superset.mcp_service.dashboard.schemas import (
AddChartToDashboardRequest,
AddChartToDashboardResponse,
@@ -147,75 +148,79 @@ def add_chart_to_existing_dashboard(
from superset.commands.dashboard.update import UpdateDashboardCommand
from superset.daos.dashboard import DashboardDAO
- # Validate dashboard exists
- dashboard = DashboardDAO.find_by_id(request.dashboard_id)
- if not dashboard:
- return AddChartToDashboardResponse(
- dashboard=None,
- dashboard_url=None,
- position=None,
- error=f"Dashboard with ID {request.dashboard_id} not found",
+ # Validate dashboard and chart exist
+ with
event_logger.log_context(action="mcp.add_chart_to_dashboard.validation"):
+ dashboard = DashboardDAO.find_by_id(request.dashboard_id)
+ if not dashboard:
+ return AddChartToDashboardResponse(
+ dashboard=None,
+ dashboard_url=None,
+ position=None,
+ error=(f"Dashboard with ID {request.dashboard_id} not
found"),
+ )
+
+ # Get chart object for SQLAlchemy relationships and validation
+ from superset import db
+ from superset.models.slice import Slice
+
+ new_chart = db.session.get(Slice, request.chart_id)
+ if not new_chart:
+ return AddChartToDashboardResponse(
+ dashboard=None,
+ dashboard_url=None,
+ position=None,
+ error=f"Chart with ID {request.chart_id} not found",
+ )
+
+ # Check if chart is already in dashboard
+ current_chart_ids = [slice.id for slice in dashboard.slices]
+ if request.chart_id in current_chart_ids:
+ return AddChartToDashboardResponse(
+ dashboard=None,
+ dashboard_url=None,
+ position=None,
+ error=(
+ f"Chart {request.chart_id} is already in dashboard "
+ f"{request.dashboard_id}"
+ ),
+ )
+
+ # Calculate layout position
+ with
event_logger.log_context(action="mcp.add_chart_to_dashboard.layout"):
+ # Parse current layout
+ try:
+ current_layout = json.loads(dashboard.position_json or "{}")
+ except (json.JSONDecodeError, TypeError):
+ current_layout = {}
+
+ # Find position for new chart
+ row_index = _find_next_row_position(current_layout)
+
+ # Add chart and row to layout
+ chart_key, row_key = _add_chart_to_layout(
+ current_layout, new_chart, request.chart_id, row_index
)
- # Get chart object for SQLAlchemy relationships and validation
- from superset import db
- from superset.models.slice import Slice
-
- new_chart = db.session.get(Slice, request.chart_id)
- if not new_chart:
- return AddChartToDashboardResponse(
- dashboard=None,
- dashboard_url=None,
- position=None,
- error=f"Chart with ID {request.chart_id} not found",
- )
-
- # Check if chart is already in dashboard
- current_chart_ids = [slice.id for slice in dashboard.slices]
- if request.chart_id in current_chart_ids:
- return AddChartToDashboardResponse(
- dashboard=None,
- dashboard_url=None,
- position=None,
- error=(
- f"Chart {request.chart_id} is already in dashboard "
- f"{request.dashboard_id}"
- ),
- )
-
- # Parse current layout
- try:
- current_layout = json.loads(dashboard.position_json or "{}")
- except (json.JSONDecodeError, TypeError):
- current_layout = {}
-
- # Find position for new chart
- row_index = _find_next_row_position(current_layout)
-
- # Add chart and row to layout
- chart_key, row_key = _add_chart_to_layout(
- current_layout, new_chart, request.chart_id, row_index
- )
-
- # Ensure proper layout structure
- _ensure_layout_structure(current_layout, row_key)
-
- # Get chart objects for SQLAlchemy relationships
- # Get existing chart objects
- existing_chart_objects = dashboard.slices
-
- # Combine existing and new chart objects (new_chart was retrieved
above)
- all_chart_objects = list(existing_chart_objects) + [new_chart]
-
- # Prepare update data
- update_data = {
- "position_json": json.dumps(current_layout),
- "slices": all_chart_objects, # Pass ORM objects, not IDs
- }
+ # Ensure proper layout structure
+ _ensure_layout_structure(current_layout, row_key)
# Update the dashboard
- command = UpdateDashboardCommand(request.dashboard_id, update_data)
- updated_dashboard = command.run()
+ with
event_logger.log_context(action="mcp.add_chart_to_dashboard.db_write"):
+ # Get existing chart objects
+ existing_chart_objects = dashboard.slices
+
+ # Combine existing and new chart objects
+ all_chart_objects = list(existing_chart_objects) + [new_chart]
+
+ # Prepare update data
+ update_data = {
+ "position_json": json.dumps(current_layout),
+ "slices": all_chart_objects, # Pass ORM objects, not IDs
+ }
+
+ # Update the dashboard
+ command = UpdateDashboardCommand(request.dashboard_id, update_data)
+ updated_dashboard = command.run()
# Convert to response format
from superset.mcp_service.dashboard.schemas import (
diff --git a/superset/mcp_service/dashboard/tool/generate_dashboard.py
b/superset/mcp_service/dashboard/tool/generate_dashboard.py
index c2b99703f3c..5d537ced2af 100644
--- a/superset/mcp_service/dashboard/tool/generate_dashboard.py
+++ b/superset/mcp_service/dashboard/tool/generate_dashboard.py
@@ -27,6 +27,7 @@ from typing import Any, Dict, List
from fastmcp import Context
from superset_core.mcp import tool
+from superset.extensions import event_logger
from superset.mcp_service.dashboard.schemas import (
DashboardInfo,
GenerateDashboardRequest,
@@ -137,57 +138,63 @@ def generate_dashboard(
from superset.commands.dashboard.create import CreateDashboardCommand
from superset.models.slice import Slice
- chart_objects = (
-
db.session.query(Slice).filter(Slice.id.in_(request.chart_ids)).all()
- )
- found_chart_ids = [chart.id for chart in chart_objects]
-
- # Check if all requested charts were found
- missing_chart_ids = set(request.chart_ids) - set(found_chart_ids)
- if missing_chart_ids:
- return GenerateDashboardResponse(
- dashboard=None,
- dashboard_url=None,
- error=f"Charts not found: {list(missing_chart_ids)}",
+ with
event_logger.log_context(action="mcp.generate_dashboard.chart_validation"):
+ chart_objects = (
+
db.session.query(Slice).filter(Slice.id.in_(request.chart_ids)).all()
)
+ found_chart_ids = [chart.id for chart in chart_objects]
+
+ # Check if all requested charts were found
+ missing_chart_ids = set(request.chart_ids) - set(found_chart_ids)
+ if missing_chart_ids:
+ return GenerateDashboardResponse(
+ dashboard=None,
+ dashboard_url=None,
+ error=f"Charts not found: {list(missing_chart_ids)}",
+ )
# Create dashboard layout with chart objects
- layout = _create_dashboard_layout(chart_objects)
-
- # Prepare dashboard data
- dashboard_data = {
- "dashboard_title": request.dashboard_title,
- "slug": None, # Let Superset auto-generate slug
- "css": "",
- "json_metadata": json.dumps(
- {
- "filter_scopes": {},
- "expanded_slices": {},
- "refresh_frequency": 0,
- "timed_refresh_immune_slices": [],
- "color_scheme": None,
- "label_colors": {},
- "shared_label_colors": {},
- "color_scheme_domain": [],
- "cross_filters_enabled": False,
- "native_filter_configuration": [],
- "global_chart_configuration": {
- "scope": {"rootPath": ["ROOT_ID"], "excluded": []}
- },
- "chart_configuration": {},
- }
- ),
- "position_json": json.dumps(layout),
- "published": request.published,
- "slices": chart_objects, # Pass ORM objects, not IDs
- }
+ with event_logger.log_context(action="mcp.generate_dashboard.layout"):
+ layout = _create_dashboard_layout(chart_objects)
+
+ # Prepare dashboard data and create dashboard
+ with
event_logger.log_context(action="mcp.generate_dashboard.db_write"):
+ dashboard_data = {
+ "dashboard_title": request.dashboard_title,
+ "slug": None, # Let Superset auto-generate slug
+ "css": "",
+ "json_metadata": json.dumps(
+ {
+ "filter_scopes": {},
+ "expanded_slices": {},
+ "refresh_frequency": 0,
+ "timed_refresh_immune_slices": [],
+ "color_scheme": None,
+ "label_colors": {},
+ "shared_label_colors": {},
+ "color_scheme_domain": [],
+ "cross_filters_enabled": False,
+ "native_filter_configuration": [],
+ "global_chart_configuration": {
+ "scope": {
+ "rootPath": ["ROOT_ID"],
+ "excluded": [],
+ }
+ },
+ "chart_configuration": {},
+ }
+ ),
+ "position_json": json.dumps(layout),
+ "published": request.published,
+ "slices": chart_objects, # Pass ORM objects, not IDs
+ }
- if request.description:
- dashboard_data["description"] = request.description
+ if request.description:
+ dashboard_data["description"] = request.description
- # Create the dashboard using Superset's command pattern
- command = CreateDashboardCommand(dashboard_data)
- dashboard = command.run()
+ # Create the dashboard using Superset's command pattern
+ command = CreateDashboardCommand(dashboard_data)
+ dashboard = command.run()
# Convert to our response format
from superset.mcp_service.dashboard.schemas import (
diff --git a/superset/mcp_service/dashboard/tool/get_dashboard_info.py
b/superset/mcp_service/dashboard/tool/get_dashboard_info.py
index d67090973b8..d567c054949 100644
--- a/superset/mcp_service/dashboard/tool/get_dashboard_info.py
+++ b/superset/mcp_service/dashboard/tool/get_dashboard_info.py
@@ -28,6 +28,7 @@ from datetime import datetime, timezone
from fastmcp import Context
from superset_core.mcp import tool
+from superset.extensions import event_logger
from superset.mcp_service.dashboard.schemas import (
dashboard_serializer,
DashboardError,
@@ -59,16 +60,17 @@ async def get_dashboard_info(
try:
from superset.daos.dashboard import DashboardDAO
- tool = ModelGetInfoCore(
- dao_class=DashboardDAO,
- output_schema=DashboardInfo,
- error_schema=DashboardError,
- serializer=dashboard_serializer,
- supports_slug=True, # Dashboards support slugs
- logger=logger,
- )
+ with event_logger.log_context(action="mcp.get_dashboard_info.lookup"):
+ tool = ModelGetInfoCore(
+ dao_class=DashboardDAO,
+ output_schema=DashboardInfo,
+ error_schema=DashboardError,
+ serializer=dashboard_serializer,
+ supports_slug=True, # Dashboards support slugs
+ logger=logger,
+ )
- result = tool.run_tool(request.identifier)
+ result = tool.run_tool(request.identifier)
if isinstance(result, DashboardInfo):
await ctx.info(
diff --git a/superset/mcp_service/dashboard/tool/list_dashboards.py
b/superset/mcp_service/dashboard/tool/list_dashboards.py
index db0a30da945..3d492567374 100644
--- a/superset/mcp_service/dashboard/tool/list_dashboards.py
+++ b/superset/mcp_service/dashboard/tool/list_dashboards.py
@@ -31,6 +31,7 @@ from superset_core.mcp import tool
if TYPE_CHECKING:
from superset.models.dashboard import Dashboard
+from superset.extensions import event_logger
from superset.mcp_service.dashboard.schemas import (
DashboardFilter,
DashboardInfo,
@@ -123,15 +124,16 @@ async def list_dashboards(
logger=logger,
)
- result = tool.run_tool(
- filters=request.filters,
- search=request.search,
- select_columns=request.select_columns,
- order_column=request.order_column,
- order_direction=request.order_direction,
- page=max(request.page - 1, 0),
- page_size=request.page_size,
- )
+ with event_logger.log_context(action="mcp.list_dashboards.query"):
+ result = tool.run_tool(
+ filters=request.filters,
+ search=request.search,
+ select_columns=request.select_columns,
+ order_column=request.order_column,
+ order_direction=request.order_direction,
+ page=max(request.page - 1, 0),
+ page_size=request.page_size,
+ )
count = len(result.dashboards) if hasattr(result, "dashboards") else 0
total_pages = getattr(result, "total_pages", None)
await ctx.info(
@@ -147,4 +149,7 @@ async def list_dashboards(
"Applying field filtering via serialization context: columns=%s"
% (columns_to_filter,)
)
- return result.model_dump(mode="json", context={"select_columns":
columns_to_filter})
+ with event_logger.log_context(action="mcp.list_dashboards.serialization"):
+ return result.model_dump(
+ mode="json", context={"select_columns": columns_to_filter}
+ )
diff --git a/superset/mcp_service/dataset/tool/get_dataset_info.py
b/superset/mcp_service/dataset/tool/get_dataset_info.py
index b213a6fb781..e9e8817d2d9 100644
--- a/superset/mcp_service/dataset/tool/get_dataset_info.py
+++ b/superset/mcp_service/dataset/tool/get_dataset_info.py
@@ -28,6 +28,7 @@ from datetime import datetime, timezone
from fastmcp import Context
from superset_core.mcp import tool
+from superset.extensions import event_logger
from superset.mcp_service.dataset.schemas import (
DatasetError,
DatasetInfo,
@@ -83,16 +84,17 @@ async def get_dataset_info(
try:
from superset.daos.dataset import DatasetDAO
- tool = ModelGetInfoCore(
- dao_class=DatasetDAO,
- output_schema=DatasetInfo,
- error_schema=DatasetError,
- serializer=serialize_dataset_object,
- supports_slug=False, # Datasets don't have slugs
- logger=logger,
- )
+ with event_logger.log_context(action="mcp.get_dataset_info.lookup"):
+ tool = ModelGetInfoCore(
+ dao_class=DatasetDAO,
+ output_schema=DatasetInfo,
+ error_schema=DatasetError,
+ serializer=serialize_dataset_object,
+ supports_slug=False, # Datasets don't have slugs
+ logger=logger,
+ )
- result = tool.run_tool(request.identifier)
+ result = tool.run_tool(request.identifier)
if isinstance(result, DatasetInfo):
await ctx.info(
diff --git a/superset/mcp_service/dataset/tool/list_datasets.py
b/superset/mcp_service/dataset/tool/list_datasets.py
index 4d81f13eb03..f33d455636d 100644
--- a/superset/mcp_service/dataset/tool/list_datasets.py
+++ b/superset/mcp_service/dataset/tool/list_datasets.py
@@ -31,6 +31,7 @@ from superset_core.mcp import tool
if TYPE_CHECKING:
from superset.connectors.sqla.models import SqlaTable
+from superset.extensions import event_logger
from superset.mcp_service.dataset.schemas import (
DatasetFilter,
DatasetInfo,
@@ -129,15 +130,16 @@ async def list_datasets(request: ListDatasetsRequest,
ctx: Context) -> DatasetLi
logger=logger,
)
- result = tool.run_tool(
- filters=request.filters,
- search=request.search,
- select_columns=request.select_columns,
- order_column=request.order_column,
- order_direction=request.order_direction,
- page=max(request.page - 1, 0),
- page_size=request.page_size,
- )
+ with event_logger.log_context(action="mcp.list_datasets.query"):
+ result = tool.run_tool(
+ filters=request.filters,
+ search=request.search,
+ select_columns=request.select_columns,
+ order_column=request.order_column,
+ order_direction=request.order_direction,
+ page=max(request.page - 1, 0),
+ page_size=request.page_size,
+ )
await ctx.info(
"Datasets listed successfully: count=%s, total_count=%s,
total_pages=%s"
@@ -156,9 +158,11 @@ async def list_datasets(request: ListDatasetsRequest, ctx:
Context) -> DatasetLi
"Applying field filtering via serialization context: columns=%s"
% (columns_to_filter,)
)
- return result.model_dump(
- mode="json", context={"select_columns": columns_to_filter}
- )
+ with
event_logger.log_context(action="mcp.list_datasets.serialization"):
+ return result.model_dump(
+ mode="json",
+ context={"select_columns": columns_to_filter},
+ )
except Exception as e:
await ctx.error(
diff --git a/superset/mcp_service/explore/tool/generate_explore_link.py
b/superset/mcp_service/explore/tool/generate_explore_link.py
index d1f07daf929..3048a538c6c 100644
--- a/superset/mcp_service/explore/tool/generate_explore_link.py
+++ b/superset/mcp_service/explore/tool/generate_explore_link.py
@@ -28,6 +28,7 @@ from urllib.parse import parse_qs, urlparse
from fastmcp import Context
from superset_core.mcp import tool
+from superset.extensions import event_logger
from superset.mcp_service.chart.chart_utils import (
generate_explore_link as generate_url,
map_config_to_form_data,
@@ -91,11 +92,11 @@ async def generate_explore_link(
try:
await ctx.report_progress(1, 3, "Converting configuration to form
data")
- # Map config to form_data using shared utilities
- # Pass dataset_id to enable column type checking for proper viz_type
selection
- form_data = map_config_to_form_data(
- request.config, dataset_id=request.dataset_id
- )
+ with
event_logger.log_context(action="mcp.generate_explore_link.form_data"):
+ # Map config to form_data using shared utilities
+ form_data = map_config_to_form_data(
+ request.config, dataset_id=request.dataset_id
+ )
# Add datasource to form_data for consistency with generate_chart
# Only set if not already present to avoid overwriting
@@ -112,8 +113,13 @@ async def generate_explore_link(
)
await ctx.report_progress(2, 3, "Generating explore URL")
- # Generate explore link using shared utilities
- explore_url = generate_url(dataset_id=request.dataset_id,
form_data=form_data)
+ with event_logger.log_context(
+ action="mcp.generate_explore_link.url_generation"
+ ):
+ # Generate explore link using shared utilities
+ explore_url = generate_url(
+ dataset_id=request.dataset_id, form_data=form_data
+ )
# Extract form_data_key from the explore URL using proper URL parsing
form_data_key = None
diff --git a/superset/mcp_service/middleware.py
b/superset/mcp_service/middleware.py
index 29c64275b58..7b3508c76a5 100644
--- a/superset/mcp_service/middleware.py
+++ b/superset/mcp_service/middleware.py
@@ -87,20 +87,47 @@ def _sanitize_error_for_logging(error: Exception) -> str:
return error_str
+_SENSITIVE_PARAM_KEYS = frozenset(
+ {
+ "password",
+ "token",
+ "api_key",
+ "secret",
+ "credentials",
+ "authorization",
+ "cookie",
+ }
+)
+
+
+def _sanitize_params(params: dict[str, Any]) -> dict[str, Any]:
+ """Remove sensitive fields from params before logging."""
+ if not isinstance(params, dict):
+ return params
+ return {
+ k: "[REDACTED]" if k.lower() in _SENSITIVE_PARAM_KEYS else v
+ for k, v in params.items()
+ }
+
+
class LoggingMiddleware(Middleware):
"""
Middleware that logs every MCP message (request and response) using the
event logger. This matches the core audit log system (Action Log UI,
logs table, custom loggers). Also attempts to log dashboard_id, chart_id
(slice_id), and dataset_id if present in tool params.
+
+ Tool calls are handled in on_call_tool() which wraps execution to capture
+ duration_ms. Non-tool messages (resource reads, prompts, etc.) are handled
+ in on_message().
"""
- async def on_message(
- self,
- context: MiddlewareContext,
- call_next: Callable[[MiddlewareContext], Awaitable[Any]],
- ) -> Any:
- # Extract agent_id and user_id
+ def _extract_context_info(
+ self, context: MiddlewareContext
+ ) -> tuple[
+ str | None, int | None, int | None, int | None, int | None, dict[str,
Any]
+ ]:
+ """Extract agent_id, user_id, and entity IDs from context."""
agent_id = None
user_id = None
dashboard_id = None
@@ -113,18 +140,78 @@ class LoggingMiddleware(Middleware):
agent_id = getattr(context.session, "agent_id", None)
try:
user_id = get_user_id()
- except Exception:
+ except (RuntimeError, AttributeError):
user_id = None
- # Try to extract IDs from params
if isinstance(params, dict):
dashboard_id = params.get("dashboard_id")
- # Chart ID may be under 'chart_id' or 'slice_id'
slice_id = params.get("chart_id") or params.get("slice_id")
dataset_id = params.get("dataset_id")
- # Log to Superset's event logger (DB, Action Log UI, or custom)
+ return agent_id, user_id, dashboard_id, slice_id, dataset_id, params
+
+ async def on_call_tool(
+ self,
+ context: MiddlewareContext,
+ call_next: Callable[[MiddlewareContext], Awaitable[Any]],
+ ) -> Any:
+ """Log tool calls with duration tracking."""
+ agent_id, user_id, dashboard_id, slice_id, dataset_id, params = (
+ self._extract_context_info(context)
+ )
+ tool_name = getattr(context.message, "name", None)
+
+ start_time = time.time()
+ success = False
+ try:
+ result = await call_next(context)
+ success = True
+ return result
+ finally:
+ duration_ms = int((time.time() - start_time) * 1000)
+ event_logger.log(
+ user_id=user_id,
+ action="mcp_tool_call",
+ dashboard_id=dashboard_id,
+ duration_ms=duration_ms,
+ slice_id=slice_id,
+ referrer=None,
+ curated_payload={
+ "tool": tool_name,
+ "agent_id": agent_id,
+ "params": _sanitize_params(params),
+ "method": context.method,
+ "dashboard_id": dashboard_id,
+ "slice_id": slice_id,
+ "dataset_id": dataset_id,
+ "success": success,
+ },
+ )
+ logger.info(
+ "MCP tool call: tool=%s, agent_id=%s, user_id=%s, method=%s, "
+ "dashboard_id=%s, slice_id=%s, dataset_id=%s, duration_ms=%s, "
+ "success=%s",
+ tool_name,
+ agent_id,
+ user_id,
+ context.method,
+ dashboard_id,
+ slice_id,
+ dataset_id,
+ duration_ms,
+ success,
+ )
+
+ async def on_message(
+ self,
+ context: MiddlewareContext,
+ call_next: Callable[[MiddlewareContext], Awaitable[Any]],
+ ) -> Any:
+ """Log non-tool messages (resource reads, prompts, etc.)."""
+ agent_id, user_id, dashboard_id, slice_id, dataset_id, params = (
+ self._extract_context_info(context)
+ )
event_logger.log(
user_id=user_id,
- action="mcp_tool_call",
+ action="mcp_message",
dashboard_id=dashboard_id,
duration_ms=None,
slice_id=slice_id,
@@ -132,24 +219,19 @@ class LoggingMiddleware(Middleware):
curated_payload={
"tool": getattr(context.message, "name", None),
"agent_id": agent_id,
- "params": params,
+ "params": _sanitize_params(params),
"method": context.method,
"dashboard_id": dashboard_id,
"slice_id": slice_id,
"dataset_id": dataset_id,
},
)
- # (Optional) also log to standard logger for debugging
logger.info(
- "MCP tool call: tool=%s, agent_id=%s, user_id=%s, method=%s, "
- "dashboard_id=%s, slice_id=%s, dataset_id=%s",
+ "MCP message: tool=%s, agent_id=%s, user_id=%s, method=%s",
getattr(context.message, "name", None),
agent_id,
user_id,
context.method,
- dashboard_id,
- slice_id,
- dataset_id,
)
return await call_next(context)
diff --git a/superset/mcp_service/sql_lab/tool/execute_sql.py
b/superset/mcp_service/sql_lab/tool/execute_sql.py
index 64fe24a395a..fee996c97c4 100644
--- a/superset/mcp_service/sql_lab/tool/execute_sql.py
+++ b/superset/mcp_service/sql_lab/tool/execute_sql.py
@@ -33,6 +33,7 @@ from superset_core.mcp import tool
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.exceptions import SupersetErrorException,
SupersetSecurityException
+from superset.extensions import event_logger
from superset.mcp_service.sql_lab.schemas import (
ColumnInfo,
ExecuteSqlRequest,
@@ -72,28 +73,29 @@ async def execute_sql(request: ExecuteSqlRequest, ctx:
Context) -> ExecuteSqlRes
from superset.models.core import Database
# 1. Get database and check access
- database =
db.session.query(Database).filter_by(id=request.database_id).first()
- if not database:
- raise SupersetErrorException(
- SupersetError(
- message=f"Database with ID {request.database_id} not
found",
- error_type=SupersetErrorType.DATABASE_NOT_FOUND_ERROR,
- level=ErrorLevel.ERROR,
- )
+ with event_logger.log_context(action="mcp.execute_sql.db_validation"):
+ database = (
+
db.session.query(Database).filter_by(id=request.database_id).first()
)
+ if not database:
+ raise SupersetErrorException(
+ SupersetError(
+ message=f"Database with ID {request.database_id} not
found",
+ error_type=SupersetErrorType.DATABASE_NOT_FOUND_ERROR,
+ level=ErrorLevel.ERROR,
+ )
+ )
- if not security_manager.can_access_database(database):
- raise SupersetSecurityException(
- SupersetError(
- message=f"Access denied to database
{database.database_name}",
-
error_type=SupersetErrorType.DATABASE_SECURITY_ACCESS_ERROR,
- level=ErrorLevel.ERROR,
+ if not security_manager.can_access_database(database):
+ raise SupersetSecurityException(
+ SupersetError(
+ message=(f"Access denied to database
{database.database_name}"),
+
error_type=SupersetErrorType.DATABASE_SECURITY_ACCESS_ERROR,
+ level=ErrorLevel.ERROR,
+ )
)
- )
- # 2. Build QueryOptions
- # Caching is enabled by default to reduce database load.
- # force_refresh bypasses cache when user explicitly requests fresh
data.
+ # 2. Build QueryOptions and execute query
cache_opts = CacheOptions(force_refresh=True) if request.force_refresh
else None
options = QueryOptions(
catalog=request.catalog,
@@ -106,10 +108,12 @@ async def execute_sql(request: ExecuteSqlRequest, ctx:
Context) -> ExecuteSqlRes
)
# 3. Execute query
- result = database.execute(request.sql, options)
+ with
event_logger.log_context(action="mcp.execute_sql.query_execution"):
+ result = database.execute(request.sql, options)
# 4. Convert to MCP response format
- response = _convert_to_response(result)
+ with
event_logger.log_context(action="mcp.execute_sql.response_conversion"):
+ response = _convert_to_response(result)
# Log successful execution
if response.success:
diff --git a/superset/mcp_service/sql_lab/tool/open_sql_lab_with_context.py
b/superset/mcp_service/sql_lab/tool/open_sql_lab_with_context.py
index fd792e58b3b..e702994c22b 100644
--- a/superset/mcp_service/sql_lab/tool/open_sql_lab_with_context.py
+++ b/superset/mcp_service/sql_lab/tool/open_sql_lab_with_context.py
@@ -27,6 +27,7 @@ from urllib.parse import urlencode
from fastmcp import Context
from superset_core.mcp import tool
+from superset.extensions import event_logger
from superset.mcp_service.sql_lab.schemas import (
OpenSqlLabRequest,
SqlLabResponse,
@@ -48,8 +49,9 @@ def open_sql_lab_with_context(
try:
from superset.daos.database import DatabaseDAO
- # Validate database exists and is accessible
- database = DatabaseDAO.find_by_id(request.database_connection_id)
+ with event_logger.log_context(action="mcp.open_sql_lab.db_validation"):
+ # Validate database exists and is accessible
+ database = DatabaseDAO.find_by_id(request.database_connection_id)
if not database:
return SqlLabResponse(
url="",
diff --git a/superset/mcp_service/system/tool/get_instance_info.py
b/superset/mcp_service/system/tool/get_instance_info.py
index 9cb49768bee..7c142acabfe 100644
--- a/superset/mcp_service/system/tool/get_instance_info.py
+++ b/superset/mcp_service/system/tool/get_instance_info.py
@@ -25,6 +25,7 @@ import logging
from fastmcp import Context
from superset_core.mcp import tool
+from superset.extensions import event_logger
from superset.mcp_service.mcp_core import InstanceInfoCore
from superset.mcp_service.system.schemas import (
GetSupersetInstanceInfoRequest,
@@ -98,7 +99,8 @@ def get_instance_info(
}
# Run the configurable core
- return _instance_info_core.run_tool()
+ with event_logger.log_context(action="mcp.get_instance_info.metrics"):
+ return _instance_info_core.run_tool()
except Exception as e:
error_msg = f"Unexpected error in instance info: {str(e)}"
diff --git a/superset/mcp_service/system/tool/get_schema.py
b/superset/mcp_service/system/tool/get_schema.py
index eac47be3a80..c5c25f0e387 100644
--- a/superset/mcp_service/system/tool/get_schema.py
+++ b/superset/mcp_service/system/tool/get_schema.py
@@ -29,6 +29,7 @@ from typing import Callable, Literal
from fastmcp import Context
from superset_core.mcp import tool
+from superset.extensions import event_logger
from superset.mcp_service.common.schema_discovery import (
CHART_DEFAULT_COLUMNS,
CHART_SEARCH_COLUMNS,
@@ -154,8 +155,9 @@ async def get_schema(request: GetSchemaRequest, ctx:
Context) -> GetSchemaRespon
)
# Create core instance and run (columns extracted dynamically)
- core = factory()
- schema_info = core.run_tool()
+ with event_logger.log_context(action="mcp.get_schema.discovery"):
+ core = factory()
+ schema_info = core.run_tool()
await ctx.debug(
f"Schema for {request.model_type}: "
diff --git a/superset/mcp_service/system/tool/health_check.py
b/superset/mcp_service/system/tool/health_check.py
index d6032d9b9ae..0283438e6ff 100644
--- a/superset/mcp_service/system/tool/health_check.py
+++ b/superset/mcp_service/system/tool/health_check.py
@@ -24,6 +24,7 @@ import platform
from flask import current_app
from superset_core.mcp import tool
+from superset.extensions import event_logger
from superset.mcp_service.system.schemas import HealthCheckResponse
from superset.utils.version import get_version_metadata
@@ -64,9 +65,10 @@ async def health_check() -> HealthCheckResponse:
service_name = f"{app_name} MCP Service"
try:
- # Get version from Superset version metadata
- version_metadata = get_version_metadata()
- version = version_metadata.get("version_string", "unknown")
+ with event_logger.log_context(action="mcp.health_check.status"):
+ # Get version from Superset version metadata
+ version_metadata = get_version_metadata()
+ version = version_metadata.get("version_string", "unknown")
response = HealthCheckResponse(
status="healthy",
diff --git a/tests/unit_tests/extensions/test_types.py
b/tests/unit_tests/extensions/test_types.py
index 4cf62587043..dea22ee19ce 100644
--- a/tests/unit_tests/extensions/test_types.py
+++ b/tests/unit_tests/extensions/test_types.py
@@ -67,10 +67,13 @@ def test_extension_config_full():
"views": {
"sqllab": {
"panels": [
- {"id": "query_insights.main", "name": "Query
Insights"}
- ],
- },
- },
+ {
+ "id": "query_insights.main",
+ "name": "Query Insights",
+ }
+ ]
+ }
+ }
},
"moduleFederation": {"exposes": ["./index"]},
},
diff --git a/tests/unit_tests/mcp_service/test_middleware_logging.py
b/tests/unit_tests/mcp_service/test_middleware_logging.py
new file mode 100644
index 00000000000..1f48d531ccc
--- /dev/null
+++ b/tests/unit_tests/mcp_service/test_middleware_logging.py
@@ -0,0 +1,207 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+"""
+Unit tests for LoggingMiddleware on_call_tool() and on_message() methods.
+
+Tests verify that:
+- on_call_tool() captures duration_ms and success status
+- on_message() logs non-tool messages without duration
+- _extract_context_info() extracts entity IDs from params
+"""
+
+from typing import Any
+from unittest.mock import AsyncMock, MagicMock, patch
+
+import pytest
+
+from superset.mcp_service.middleware import LoggingMiddleware
+
+
+def _make_context(
+ method: str = "tools/call",
+ name: str = "list_charts",
+ params: dict[str, Any] | None = None,
+ metadata: dict[str, Any] | None = None,
+):
+ """Create a mock MiddlewareContext."""
+ ctx = MagicMock()
+ ctx.method = method
+ message = MagicMock()
+ message.name = name
+ message.params = params or {}
+ ctx.message = message
+ if metadata is not None:
+ ctx.metadata = metadata
+ else:
+ ctx.metadata = None
+ ctx.session = None
+ return ctx
+
+
+class TestLoggingMiddlewareOnCallTool:
+ """Tests for LoggingMiddleware.on_call_tool()."""
+
+ @patch("superset.mcp_service.middleware.event_logger")
+ @patch("superset.mcp_service.middleware.get_user_id", return_value=42)
+ @pytest.mark.asyncio
+ async def test_on_call_tool_logs_duration_and_success(
+ self, mock_get_user_id, mock_event_logger
+ ):
+ """on_call_tool records duration_ms and success=True on normal
return."""
+ middleware = LoggingMiddleware()
+ ctx = _make_context(name="list_charts")
+ call_next = AsyncMock(return_value="tool_result")
+
+ result = await middleware.on_call_tool(ctx, call_next)
+
+ assert result == "tool_result"
+ call_next.assert_awaited_once_with(ctx)
+
+ # Verify event_logger.log was called with duration_ms and success
+ mock_event_logger.log.assert_called_once()
+ call_kwargs = mock_event_logger.log.call_args[1]
+ assert call_kwargs["action"] == "mcp_tool_call"
+ assert call_kwargs["user_id"] == 42
+ assert isinstance(call_kwargs["duration_ms"], int)
+ assert call_kwargs["duration_ms"] >= 0
+ assert call_kwargs["curated_payload"]["success"] is True
+ assert call_kwargs["curated_payload"]["tool"] == "list_charts"
+
+ @patch("superset.mcp_service.middleware.event_logger")
+ @patch("superset.mcp_service.middleware.get_user_id", return_value=42)
+ @pytest.mark.asyncio
+ async def test_on_call_tool_logs_failure_on_exception(
+ self, mock_get_user_id, mock_event_logger
+ ):
+ """on_call_tool records success=False when tool raises."""
+ middleware = LoggingMiddleware()
+ ctx = _make_context(name="execute_sql")
+ call_next = AsyncMock(side_effect=ValueError("boom"))
+
+ with pytest.raises(ValueError, match="boom"):
+ await middleware.on_call_tool(ctx, call_next)
+
+ # Verify event_logger.log was still called (in the finally block)
+ mock_event_logger.log.assert_called_once()
+ call_kwargs = mock_event_logger.log.call_args[1]
+ assert call_kwargs["curated_payload"]["success"] is False
+ assert call_kwargs["duration_ms"] >= 0
+
+ @patch("superset.mcp_service.middleware.event_logger")
+ @patch("superset.mcp_service.middleware.get_user_id", return_value=42)
+ @pytest.mark.asyncio
+ async def test_on_call_tool_extracts_entity_ids(
+ self, mock_get_user_id, mock_event_logger
+ ):
+ """on_call_tool extracts dashboard_id, chart_id, dataset_id from
params."""
+ middleware = LoggingMiddleware()
+ ctx = _make_context(
+ name="get_chart_info",
+ params={
+ "dashboard_id": 10,
+ "chart_id": 20,
+ "dataset_id": 30,
+ },
+ )
+ call_next = AsyncMock(return_value="ok")
+
+ await middleware.on_call_tool(ctx, call_next)
+
+ call_kwargs = mock_event_logger.log.call_args[1]
+ assert call_kwargs["dashboard_id"] == 10
+ assert call_kwargs["slice_id"] == 20
+ assert call_kwargs["curated_payload"]["dataset_id"] == 30
+
+
+class TestLoggingMiddlewareOnMessage:
+ """Tests for LoggingMiddleware.on_message()."""
+
+ @patch("superset.mcp_service.middleware.event_logger")
+ @patch("superset.mcp_service.middleware.get_user_id", return_value=1)
+ @pytest.mark.asyncio
+ async def test_on_message_logs_without_duration(
+ self, mock_get_user_id, mock_event_logger
+ ):
+ """on_message logs with action=mcp_message and duration_ms=None."""
+ middleware = LoggingMiddleware()
+ ctx = _make_context(method="resources/read", name="instance/metadata")
+ call_next = AsyncMock(return_value="resource_data")
+
+ result = await middleware.on_message(ctx, call_next)
+
+ assert result == "resource_data"
+ call_next.assert_awaited_once_with(ctx)
+
+ mock_event_logger.log.assert_called_once()
+ call_kwargs = mock_event_logger.log.call_args[1]
+ assert call_kwargs["action"] == "mcp_message"
+ assert call_kwargs["duration_ms"] is None
+ # on_message should NOT have success field
+ assert "success" not in call_kwargs["curated_payload"]
+
+
+class TestExtractContextInfo:
+ """Tests for LoggingMiddleware._extract_context_info()."""
+
+ @patch("superset.mcp_service.middleware.get_user_id", return_value=99)
+ def test_extract_with_metadata_agent_id(self, mock_get_user_id):
+ """Extracts agent_id from context.metadata."""
+ middleware = LoggingMiddleware()
+ ctx = _make_context(metadata={"agent_id": "agent-123"})
+
+ agent_id, user_id, dashboard_id, slice_id, dataset_id, params = (
+ middleware._extract_context_info(ctx)
+ )
+
+ assert agent_id == "agent-123"
+ assert user_id == 99
+
+ @patch(
+ "superset.mcp_service.middleware.get_user_id",
+ side_effect=RuntimeError("no Flask request context"),
+ )
+ def test_extract_handles_missing_user(self, mock_get_user_id):
+ """Gracefully handles missing user context."""
+ middleware = LoggingMiddleware()
+ ctx = _make_context()
+
+ agent_id, user_id, dashboard_id, slice_id, dataset_id, params = (
+ middleware._extract_context_info(ctx)
+ )
+
+ assert user_id is None
+
+ @patch("superset.mcp_service.middleware.get_user_id", return_value=1)
+ def test_extract_slice_id_from_chart_id(self, mock_get_user_id):
+ """Extracts slice_id from chart_id param (alias)."""
+ middleware = LoggingMiddleware()
+ ctx = _make_context(params={"chart_id": 55})
+
+ _, _, _, slice_id, _, _ = middleware._extract_context_info(ctx)
+
+ assert slice_id == 55
+
+ @patch("superset.mcp_service.middleware.get_user_id", return_value=1)
+ def test_extract_slice_id_from_slice_id(self, mock_get_user_id):
+ """Extracts slice_id from slice_id param (fallback)."""
+ middleware = LoggingMiddleware()
+ ctx = _make_context(params={"slice_id": 66})
+
+ _, _, _, slice_id, _, _ = middleware._extract_context_info(ctx)
+
+ assert slice_id == 66