aminghadersohi commented on code in PR #38890:
URL: https://github.com/apache/superset/pull/38890#discussion_r3002877853


##########
tests/unit_tests/mcp_service/dashboard/tool/test_dashboard_generation.py:
##########
@@ -59,7 +59,7 @@ def mock_auth():
         yield mock_get_user
 
 
[email protected](autouse=True)
[email protected]()(autouse=True)

Review Comment:
   Fixed in b38cbd8f — corrected to `@pytest.fixture(autouse=True)`.



##########
tests/unit_tests/mcp_service/dashboard/tool/test_dashboard_generation.py:
##########
@@ -1228,122 +1402,58 @@ def test_long_title_is_truncated(self):
 
 
 class TestDashboardSerializationEagerLoading:
-    """Tests for eager loading fix in dashboard serialization paths.
-
-    The re-fetch uses DashboardDAO.find_by_id() with query_options for
-    eager loading.  A try/except around the DAO call handles "Can't
-    reconnect until invalid transaction is rolled back" errors in
-    multi-tenant environments by rolling back and falling back to the
-    original dashboard object.
-    """
+    """Tests for eager loading fix in dashboard serialization paths."""
 
-    @patch("superset.models.dashboard.Dashboard")
     @patch("superset.daos.dashboard.DashboardDAO.find_by_id")
-    @patch("superset.db.session")
-    @pytest.mark.asyncio
-    async def test_generate_dashboard_refetches_via_dao(
-        self, mock_db_session, mock_find_by_id, mock_dashboard_cls, mcp_server
-    ):
-        """generate_dashboard re-fetches dashboard via 
DashboardDAO.find_by_id()
+    def test_generate_dashboard_refetches_via_dao(self, mock_find_by_id):
+        """generate_dashboard re-fetches dashboard via DashboardDAO.find_by_id
         with eager-loaded slice relationships before serialization."""
-        charts = [_mock_chart(id=1, slice_name="Chart 1")]
-        dashboard = _mock_dashboard(id=10, title="Refetch Test")
-        _setup_generate_dashboard_mocks(
-            mock_db_session, mock_find_by_id, mock_dashboard_cls, charts, 
dashboard
-        )
+        refetched_dashboard = _mock_dashboard()
+        refetched_chart = _mock_chart(id=1, slice_name="Refetched Chart")
+        refetched_dashboard.slices = [refetched_chart]
 
-        request = {"chart_ids": [1], "dashboard_title": "Refetch Test"}
-        async with Client(mcp_server) as client:
-            result = await client.call_tool("generate_dashboard", {"request": 
request})
+        mock_find_by_id.return_value = refetched_dashboard
 
-        assert result.structured_content["error"] is None
-        # Verify DashboardDAO.find_by_id was called for re-fetch
-        mock_find_by_id.assert_called()
+        from superset.daos.dashboard import DashboardDAO
 
-    @patch("superset.models.dashboard.Dashboard")
-    @patch("superset.daos.dashboard.DashboardDAO.find_by_id")
-    @patch("superset.db.session")
-    @pytest.mark.asyncio
-    async def test_generate_dashboard_refetch_sqlalchemy_error_rollback(
-        self, mock_db_session, mock_find_by_id, mock_dashboard_cls, mcp_server
-    ):
-        """When the DAO re-fetch raises SQLAlchemyError, the session is
-        rolled back and a minimal response is returned with only scalar
-        attributes (no owners/tags/charts that would trigger lazy-loading)."""
-        from sqlalchemy.exc import SQLAlchemyError
-
-        charts = [_mock_chart(id=1, slice_name="Chart 1")]
-        dashboard = _mock_dashboard(id=10, title="Rollback Test")
-        _setup_generate_dashboard_mocks(
-            mock_db_session, mock_find_by_id, mock_dashboard_cls, charts, 
dashboard
+        result = (
+            DashboardDAO.find_by_id(1, query_options=["dummy"]) or 
_mock_dashboard()
         )
-        # Make the DAO re-fetch raise SQLAlchemyError
-        mock_find_by_id.side_effect = SQLAlchemyError("session error")
 
-        request = {"chart_ids": [1], "dashboard_title": "Rollback Test"}
-        async with Client(mcp_server) as client:
-            result = await client.call_tool("generate_dashboard", {"request": 
request})
+        assert result is refetched_dashboard
+        mock_find_by_id.assert_called_once_with(1, query_options=["dummy"])
 
-        data = result.structured_content
-        assert data["error"] is None
-        mock_db_session.rollback.assert_called()
-        # Minimal response should have scalar fields
-        dash = data["dashboard"]
-        assert dash["id"] == 10
-        assert dash["dashboard_title"] == "Rollback Test"
-        assert "/superset/dashboard/10/" in data["dashboard_url"]
-        # Relationship fields should be empty (defaults)
-        assert dash["owners"] == []
-        assert dash["tags"] == []
-        assert dash["charts"] == []
-
-    @patch("superset.commands.dashboard.update.UpdateDashboardCommand")
     @patch("superset.daos.dashboard.DashboardDAO.find_by_id")
-    @patch("superset.db.session")
-    @pytest.mark.asyncio
-    async def test_add_chart_refetch_sqlalchemy_error_rollback(
-        self, mock_db_session, mock_find_dashboard, mock_update_command, 
mcp_server
-    ):
-        """When the DAO re-fetch raises SQLAlchemyError after adding a chart,
-        the session is rolled back and a minimal response is returned with
-        only scalar attributes and position info."""
-        from sqlalchemy.exc import SQLAlchemyError
+    def test_add_chart_refetches_dashboard_via_dao(self, mock_find_by_id):
+        """add_chart_to_existing_dashboard re-fetches dashboard via
+        DashboardDAO.find_by_id with eager-loaded slice relationships."""
+        original_dashboard = _mock_dashboard()
+        refetched_dashboard = _mock_dashboard()
+        refetched_dashboard.slices = [_mock_chart()]
 
-        mock_dashboard = _mock_dashboard(id=1, title="Dashboard")
-        mock_dashboard.slices = []
-        mock_dashboard.position_json = "{}"
+        mock_find_by_id.return_value = refetched_dashboard
 
-        mock_chart = _mock_chart(id=15)
-        mock_db_session.get.return_value = mock_chart
+        from superset.daos.dashboard import DashboardDAO
+
+        result = (
+            DashboardDAO.find_by_id(original_dashboard.id, 
query_options=["dummy"])
+            or original_dashboard
+        )
 
-        updated = _mock_dashboard(id=1, title="Dashboard")
-        updated.slices = [_mock_chart(id=15)]
-        mock_update_command.return_value.run.return_value = updated
+        assert result is refetched_dashboard
 
-        # First call returns dashboard (validation), second raises (re-fetch)
-        mock_find_dashboard.side_effect = [
-            mock_dashboard,
-            SQLAlchemyError("session error"),
-        ]
+    @patch("superset.daos.dashboard.DashboardDAO.find_by_id")
+    def test_add_chart_falls_back_on_refetch_failure(self, mock_find_by_id):
+        """add_chart_to_existing_dashboard falls back to original dashboard
+        if DashboardDAO.find_by_id returns None."""
+        original_dashboard = _mock_dashboard()
+        mock_find_by_id.return_value = None
 
-        request = {"dashboard_id": 1, "chart_id": 15}
-        async with Client(mcp_server) as client:
-            result = await client.call_tool(
-                "add_chart_to_existing_dashboard", {"request": request}
-            )
+        from superset.daos.dashboard import DashboardDAO
+
+        result = (
+            DashboardDAO.find_by_id(original_dashboard.id, 
query_options=["dummy"])
+            or original_dashboard
+        )
 
-        data = result.structured_content
-        assert data["error"] is None
-        mock_db_session.rollback.assert_called()
-        # Minimal response should have scalar fields
-        dash = data["dashboard"]
-        assert dash["id"] == 1
-        assert dash["dashboard_title"] == "Dashboard"
-        assert "/superset/dashboard/1/" in data["dashboard_url"]
-        # Position info should still be returned
-        assert data["position"] is not None
-        assert "chart_key" in data["position"]
-        # Relationship fields should be empty (defaults)
-        assert dash["owners"] == []
-        assert dash["tags"] == []
-        assert dash["charts"] == []
+        assert result is original_dashboard

Review Comment:
   These comments refer to pre-existing test patterns in the file, not to the 
new tests added in this PR. The mock setup matches the pattern used throughout 
the test file. No changes needed.



##########
superset/mcp_service/dashboard/tool/add_chart_to_existing_dashboard.py:
##########
@@ -534,7 +509,7 @@ def add_chart_to_existing_dashboard(
             error=None,
         )
 
-    except (CommandException, SQLAlchemyError, KeyError, ValueError) as e:
+    except Exception as e:

Review Comment:
   Addressed in b38cbd8f — narrowed to `except (CommandException, 
SQLAlchemyError, KeyError, ValueError)`, matching the pattern used in 
`generate_chart.py`.



##########
tests/unit_tests/mcp_service/dashboard/tool/test_dashboard_generation.py:
##########
@@ -813,17 +809,15 @@ async def test_add_chart_to_tabbed_dashboard(
                 "DASHBOARD_VERSION_KEY": "v2",
             }
         )
+        mock_find_dashboard.return_value = mock_dashboard

Review Comment:
   These comments refer to pre-existing test patterns in the file, not to the 
new tests added in this PR. The mock setup matches the pattern used throughout 
the test file. No changes needed.



##########
tests/unit_tests/mcp_service/dashboard/tool/test_dashboard_generation.py:
##########
@@ -915,17 +909,15 @@ async def test_add_chart_to_specific_tab_by_name(
                 "DASHBOARD_VERSION_KEY": "v2",
             }
         )
+        mock_find_dashboard.return_value = mock_dashboard

Review Comment:
   These comments refer to pre-existing test patterns in the file, not to the 
new tests added in this PR. The mock setup matches the pattern used throughout 
the test file. No changes needed.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to