This is an automated email from the ASF dual-hosted git repository.

yiguolei pushed a commit to branch dev-1.1.2
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/dev-1.1.2 by this push:
     new 228715497c [fix](join) fix join reorder bug, pick from master (#12245)
228715497c is described below

commit 228715497c16931046d102734c6e1fb5204c3472
Author: starocean999 <[email protected]>
AuthorDate: Thu Sep 1 16:34:23 2022 +0800

    [fix](join) fix join reorder bug, pick from master (#12245)
---
 .../java/org/apache/doris/analysis/FromClause.java | 54 ++++++++++++++++++++--
 .../java/org/apache/doris/analysis/SelectStmt.java | 12 ++++-
 2 files changed, 60 insertions(+), 6 deletions(-)

diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/FromClause.java 
b/fe/fe-core/src/main/java/org/apache/doris/analysis/FromClause.java
index fa6242f01f..c24e5a5da8 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/FromClause.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/FromClause.java
@@ -51,12 +51,24 @@ public class FromClause implements ParseNode, 
Iterable<TableRef> {
     private boolean analyzed_ = false;
     private boolean needToSql = false;
 
+    // the tables positions may be changed by 'join reorder' optimization
+    // after reset, the original order information is lost
+    // in the next re-analyze phase, the mis-ordered tables may lead to 
'unable to find column xxx' error
+    // now we use originalTableRefOrders to keep track of table order 
information
+    // so that in reset method, we can recover the original table orders.
+    private final ArrayList<TableRef> originalTableRefOrders = new 
ArrayList<TableRef>();
+
     public FromClause(List<TableRef> tableRefs) {
         tableRefs_ = Lists.newArrayList(tableRefs);
         // Set left table refs to ensure correct toSql() before analysis.
         for (int i = 1; i < tableRefs_.size(); ++i) {
             tableRefs_.get(i).setLeftTblRef(tableRefs_.get(i - 1));
         }
+        // save the tableRef's order, will use in reset method later
+        originalTableRefOrders.clear();
+        for (int i = 0; i < tableRefs_.size(); ++i) {
+            originalTableRefOrders.add(tableRefs_.get(i));
+        }
     }
 
     public FromClause() { tableRefs_ = Lists.newArrayList(); }
@@ -156,6 +168,12 @@ public class FromClause implements ParseNode, 
Iterable<TableRef> {
         checkFromHiveTable(analyzer);
 
         analyzed_ = true;
+
+        // save the tableRef's order, will use in reset method later
+        originalTableRefOrders.clear();
+        for (int i = 0; i < tableRefs_.size(); ++i) {
+            originalTableRefOrders.add(tableRefs_.get(i));
+        }
     }
 
     private void checkExternalTable(Analyzer analyzer) throws UserException {
@@ -210,7 +228,12 @@ public class FromClause implements ParseNode, 
Iterable<TableRef> {
         for (TableRef tblRef : tableRefs_) {
             clone.add(tblRef.clone());
         }
-        return new FromClause(clone);
+
+        FromClause result = new FromClause(clone);
+        for (int i = 0; i < clone.size(); ++i) {
+            result.originalTableRefOrders.add(clone.get(i));
+        }
+        return result;
     }
 
     public void reset() {
@@ -231,6 +254,10 @@ public class FromClause implements ParseNode, 
Iterable<TableRef> {
             // }
             get(i).reset();
         }
+        // recover original table orders
+        for (int i = 0; i < size(); ++i) {
+            tableRefs_.set(i, originalTableRefOrders.get(i));
+        }
         this.analyzed_ = false;
     }
 
@@ -252,8 +279,27 @@ public class FromClause implements ParseNode, 
Iterable<TableRef> {
     public Iterator<TableRef> iterator() { return tableRefs_.iterator(); }
     public int size() { return tableRefs_.size(); }
     public TableRef get(int i) { return tableRefs_.get(i); }
-    public void set(int i, TableRef tableRef) { tableRefs_.set(i, tableRef); }
-    public void add(TableRef t) { tableRefs_.add(t); }
-    public void addAll(List<TableRef> t) { tableRefs_.addAll(t); }
+    public void set(int i, TableRef tableRef) {
+        tableRefs_.set(i, tableRef);
+        originalTableRefOrders.set(i, tableRef);
+    }
+    public void add(TableRef t) { 
+        tableRefs_.add(t); 
+        // join reorder will call add method after call clear method.
+        // we want to keep tableRefPositions unchanged in that case
+        // in other cases, tableRefs_.size() would larger than 
tableRefPositions.size()
+        // then we can update tableRefPositions. same logic in addAll method.
+        if (tableRefs_.size() > originalTableRefOrders.size()) {
+            originalTableRefOrders.add(t);
+        }
+    }
+    public void addAll(List<TableRef> t) { 
+        tableRefs_.addAll(t); 
+        if (tableRefs_.size() > originalTableRefOrders.size()) {
+            for (int i = originalTableRefOrders.size(); i < tableRefs_.size(); 
++i) {
+                originalTableRefOrders.add(tableRefs_.get(i));
+            }
+        }
+    }
     public void clear() { tableRefs_.clear(); }
 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java 
b/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java
index e93abce7fc..81df81d7f7 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java
@@ -737,12 +737,12 @@ public class SelectStmt extends QueryStmt {
 
     protected void reorderTable(Analyzer analyzer) throws AnalysisException {
         List<Pair<TableRef, Long>> candidates = Lists.newArrayList();
-        List<TableRef> originOrderBackUp = 
Lists.newArrayList(fromClause_.getTableRefs());
+        ArrayList<TableRef> originOrderBackUp = 
Lists.newArrayList(fromClause_.getTableRefs());
         // New pair of table ref and row count
         for (TableRef tblRef : fromClause_) {
             if (tblRef.getJoinOp() != JoinOperator.INNER_JOIN || 
tblRef.hasJoinHints()) {
                 // Unsupported reorder outer join
-                return;
+                break;
             }
             long rowCount = 0;
             if (tblRef.getTable().getType() == TableType.OLAP) {
@@ -751,6 +751,11 @@ public class SelectStmt extends QueryStmt {
             }
             candidates.add(new Pair(tblRef, rowCount));
         }
+        int reorderTableCount = candidates.size();
+        if (reorderTableCount < originOrderBackUp.size()) {
+            fromClause_.clear();
+            fromClause_.addAll(originOrderBackUp.subList(0, 
reorderTableCount));
+        }
         // give InlineView row count
         long last = 0;
         for (int i = candidates.size() - 1; i >= 0; --i) {
@@ -769,6 +774,9 @@ public class SelectStmt extends QueryStmt {
                 // as long as one scheme success, we return this scheme 
immediately.
                 // in this scheme, candidate.first will be consider to be the 
big table in star schema.
                 // this scheme might not be fit for snowflake schema.
+                if (reorderTableCount < originOrderBackUp.size()) {
+                    
fromClause_.addAll(originOrderBackUp.subList(reorderTableCount, 
originOrderBackUp.size()));
+                }
                 return;
             }
         }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to