This is an automated email from the ASF dual-hosted git repository.

maximebeauchemin pushed a commit to branch fix-12066-timeseries-groupby
in repository https://gitbox.apache.org/repos/asf/superset.git

commit 72687640179693dd74e0242c619c2b9632cd35a3
Author: Maxime Beauchemin <[email protected]>
AuthorDate: Wed Feb 11 02:34:09 2026 +0000

    fix(viz): flatten MultiIndex columns in Time-Series Table for multiple 
Group By (#12066)
    
    When using multiple "Group By" columns in a Time-Series Table, pivot_table()
    produces a MultiIndex on columns. Calling to_dict(orient="index") on such a
    DataFrame fails with "TypeError: keys must be str, int, float, bool or None,
    not tuple" because tuple keys cannot be JSON-serialized.
    
    Flatten MultiIndex columns to comma-separated strings before serialization
    so the result is valid JSON and compatible with the frontend.
    
    Co-Authored-By: Claude Opus 4 <[email protected]>
---
 superset/viz.py                      |  2 ++
 tests/integration_tests/viz_tests.py | 33 +++++++++++++++++++++++++++++++++
 2 files changed, 35 insertions(+)

diff --git a/superset/viz.py b/superset/viz.py
index c6c7cf699b7..1ff7ca9ddd0 100644
--- a/superset/viz.py
+++ b/superset/viz.py
@@ -747,6 +747,8 @@ class TimeTableViz(BaseViz):
         pt = df.pivot_table(index=DTTM_ALIAS, columns=columns, values=values)
         pt.index = pt.index.map(str)
         pt = pt.sort_index()
+        if isinstance(pt.columns, pd.MultiIndex):
+            pt.columns = [", ".join(str(s) for s in col) for col in pt.columns]
         return {
             "records": pt.to_dict(orient="index"),
             "columns": list(pt.columns),
diff --git a/tests/integration_tests/viz_tests.py 
b/tests/integration_tests/viz_tests.py
index 7ebcf1197fc..a3dcca8c0ba 100644
--- a/tests/integration_tests/viz_tests.py
+++ b/tests/integration_tests/viz_tests.py
@@ -626,6 +626,39 @@ class TestTimeSeriesTableViz(SupersetTestCase):
         }
         assert expected == data["records"]
 
+    def test_get_data_multiple_group_by(self):
+        form_data = {"metrics": ["sum__A"], "groupby": ["groupby1", 
"groupby2"]}
+        datasource = self.get_datasource_mock()
+        raw = {}
+        t1 = pd.Timestamp("2000")
+        t2 = pd.Timestamp("2002")
+        raw[DTTM_ALIAS] = [t1, t1, t1, t1, t2, t2, t2, t2]
+        raw["sum__A"] = [15, 20, 25, 30, 35, 40, 45, 50]
+        raw["groupby1"] = ["a1", "a2", "a1", "a2", "a1", "a2", "a1", "a2"]
+        raw["groupby2"] = ["b1", "b1", "b2", "b2", "b1", "b1", "b2", "b2"]
+        df = pd.DataFrame(raw)
+        test_viz = viz.TimeTableViz(datasource, form_data)
+        data = test_viz.get_data(df)
+        # Columns should be flattened strings, not tuples
+        assert {"a1, b1", "a1, b2", "a2, b1", "a2, b2"} == set(data["columns"])
+        time_format = "%Y-%m-%d %H:%M:%S"
+        expected = {
+            t1.strftime(time_format): {
+                "a1, b1": 15,
+                "a1, b2": 25,
+                "a2, b1": 20,
+                "a2, b2": 30,
+            },
+            t2.strftime(time_format): {
+                "a1, b1": 35,
+                "a1, b2": 45,
+                "a2, b1": 40,
+                "a2, b2": 50,
+            },
+        }
+        assert expected == data["records"]
+        assert data["is_group_by"] is True
+
     @patch("superset.viz.BaseViz.query_obj")
     def test_query_obj_throws_metrics_and_groupby(self, super_query_obj):
         datasource = self.get_datasource_mock()

Reply via email to