This is an automated email from the ASF dual-hosted git repository.

yiguolei pushed a commit to branch branch-4.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-4.0 by this push:
     new 840ce2d4f59 branch-4.0: [fix](query-cache) include variant subcolumn 
path in query cache digest #61709 (#61718)
840ce2d4f59 is described below

commit 840ce2d4f59f3c8f122bff139f761030e2c97ec3
Author: github-actions[bot] 
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Thu Mar 26 18:29:20 2026 +0800

    branch-4.0: [fix](query-cache) include variant subcolumn path in query 
cache digest #61709 (#61718)
    
    Cherry-picked from #61709
    
    Co-authored-by: 924060929 <[email protected]>
---
 .../org/apache/doris/planner/OlapScanNode.java     | 11 +++++++++-
 .../doris/planner/QueryCacheNormalizerTest.java    | 25 +++++++++++++++++++++-
 2 files changed, 34 insertions(+), 2 deletions(-)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/planner/OlapScanNode.java 
b/fe/fe-core/src/main/java/org/apache/doris/planner/OlapScanNode.java
index 494edc9e109..6c14cb7a054 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/OlapScanNode.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/OlapScanNode.java
@@ -1270,7 +1270,16 @@ public class OlapScanNode extends ScanNode {
                 .flatMap(tupleId -> 
normalizer.getDescriptorTable().getTupleDesc(tupleId).getSlots().stream())
                 .collect(Collectors.toList());
         List<Pair<SlotId, String>> selectColumns = slots.stream()
-                .map(slot -> Pair.of(slot.getId(), slot.getColumn().getName()))
+                .map(slot -> {
+                    // For variant subcolumns, use the materialized column 
name (e.g. "data.int_1")
+                    // to distinguish different subcolumns of the same variant 
column in cache digest.
+                    List<String> subColPath = slot.getSubColLables();
+                    String colName = slot.getColumn().getName();
+                    if (subColPath != null && !subColPath.isEmpty()) {
+                        colName = colName + "." + String.join(".", subColPath);
+                    }
+                    return Pair.of(slot.getId(), colName);
+                })
                 .collect(Collectors.toList());
         for (Column partitionColumn : 
olapTable.getPartitionInfo().getPartitionColumns()) {
             boolean selectPartitionColumn = false;
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/planner/QueryCacheNormalizerTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/planner/QueryCacheNormalizerTest.java
index 5648484f49d..5e618797188 100644
--- 
a/fe/fe-core/src/test/java/org/apache/doris/planner/QueryCacheNormalizerTest.java
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/planner/QueryCacheNormalizerTest.java
@@ -110,7 +110,14 @@ public class QueryCacheNormalizerTest extends 
TestWithFeService {
                 + "distributed by hash(k1) buckets 3\n"
                 + "properties('replication_num' = '1')";
 
-        createTables(nonPart, part1, part2, multiLeveParts);
+        String variantTable = "create table db1.variant_tbl("
+                + "  k1 int,\n"
+                + "  data variant)\n"
+                + "DUPLICATE KEY(k1)\n"
+                + "distributed by hash(k1) buckets 3\n"
+                + "properties('replication_num' = '1')";
+
+        createTables(nonPart, part1, part2, multiLeveParts, variantTable);
 
         
connectContext.getSessionVariable().setDisableNereidsRules("PRUNE_EMPTY_PARTITION");
         connectContext.getSessionVariable().setEnableQueryCache(true);
@@ -358,6 +365,22 @@ public class QueryCacheNormalizerTest extends 
TestWithFeService {
         Assertions.assertEquals(fourPhaseAggPlans, threePhaseAggPlans);
     }
 
+    @Test
+    public void testVariantSubColumnDigest() throws Exception {
+        // Different variant subcolumns should produce different digests
+        String digest1 = getDigest(
+                "select cast(data['int_1'] as int), count(*) from 
db1.variant_tbl group by cast(data['int_1'] as int)");
+        String digest2 = getDigest(
+                "select cast(data['int_nested'] as int), count(*) from 
db1.variant_tbl group by cast(data['int_nested'] as int)");
+        Assertions.assertNotEquals(digest1, digest2,
+                "Queries on different variant subcolumns must have different 
cache digests");
+
+        // Same variant subcolumn with different aliases should produce same 
digest
+        String digest3 = getDigest(
+                "select cast(data['int_1'] as int) as a, count(*) as cnt from 
db1.variant_tbl group by cast(data['int_1'] as int)");
+        Assertions.assertEquals(digest1, digest3);
+    }
+
     private String getDigest(String sql) throws Exception {
         return Hex.encodeHexString(getQueryCacheParam(sql).digest);
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to