Author: jvs
Date: Thu Dec  1 20:27:42 2011
New Revision: 1209226

URL: http://svn.apache.org/viewvc?rev=1209226&view=rev
Log:
HIVE-2253. Merge failing of join tree in exceptional case
(Navis Ryu via jvs)


Added:
    hive/trunk/ql/src/test/queries/clientpositive/mergejoins.q
    hive/trunk/ql/src/test/results/clientpositive/mergejoins.q.out
Modified:
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java

Modified: 
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: 
http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1209226&r1=1209225&r2=1209226&view=diff
==============================================================================
--- 
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java 
(original)
+++ 
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java 
Thu Dec  1 20:27:42 2011
@@ -5435,8 +5435,12 @@ public class SemanticAnalyzer extends Ba
           root = root.getJoinSrc();
         }
       } else {
-        parent = parent.getJoinSrc();
-        root = parent.getJoinSrc();
+        if (merged) {
+          root = root.getJoinSrc();
+        } else {
+          parent = parent.getJoinSrc();
+          root = parent.getJoinSrc();
+        }
       }
     }
   }

Added: hive/trunk/ql/src/test/queries/clientpositive/mergejoins.q
URL: 
http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/mergejoins.q?rev=1209226&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/mergejoins.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/mergejoins.q Thu Dec  1 
20:27:42 2011
@@ -0,0 +1,7 @@
+create table a (val1 int, val2 int);
+create table b (val1 int, val2 int);
+create table c (val1 int, val2 int);
+create table d (val1 int, val2 int);
+create table e (val1 int, val2 int);
+
+explain select * from a join b on a.val1=b.val1 join c on a.val1=c.val1 join d 
on a.val1=d.val1 join e on a.val2=e.val2;
\ No newline at end of file

Added: hive/trunk/ql/src/test/results/clientpositive/mergejoins.q.out
URL: 
http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/mergejoins.q.out?rev=1209226&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/mergejoins.q.out (added)
+++ hive/trunk/ql/src/test/results/clientpositive/mergejoins.q.out Thu Dec  1 
20:27:42 2011
@@ -0,0 +1,220 @@
+PREHOOK: query: create table a (val1 int, val2 int)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table a (val1 int, val2 int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@a
+PREHOOK: query: create table b (val1 int, val2 int)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table b (val1 int, val2 int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@b
+PREHOOK: query: create table c (val1 int, val2 int)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table c (val1 int, val2 int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@c
+PREHOOK: query: create table d (val1 int, val2 int)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table d (val1 int, val2 int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@d
+PREHOOK: query: create table e (val1 int, val2 int)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table e (val1 int, val2 int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@e
+PREHOOK: query: explain select * from a join b on a.val1=b.val1 join c on 
a.val1=c.val1 join d on a.val1=d.val1 join e on a.val2=e.val2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from a join b on a.val1=b.val1 join c on 
a.val1=c.val1 join d on a.val1=d.val1 join e on a.val2=e.val2
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_JOIN (TOK_JOIN (TOK_JOIN (TOK_TABREF 
(TOK_TABNAME a)) (TOK_TABREF (TOK_TABNAME b)) (= (. (TOK_TABLE_OR_COL a) val1) 
(. (TOK_TABLE_OR_COL b) val1))) (TOK_TABREF (TOK_TABNAME c)) (= (. 
(TOK_TABLE_OR_COL a) val1) (. (TOK_TABLE_OR_COL c) val1))) (TOK_TABREF 
(TOK_TABNAME d)) (= (. (TOK_TABLE_OR_COL a) val1) (. (TOK_TABLE_OR_COL d) 
val1))) (TOK_TABREF (TOK_TABNAME e)) (= (. (TOK_TABLE_OR_COL a) val2) (. 
(TOK_TABLE_OR_COL e) val2)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR 
TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        a 
+          TableScan
+            alias: a
+            Reduce Output Operator
+              key expressions:
+                    expr: val1
+                    type: int
+              sort order: +
+              Map-reduce partition columns:
+                    expr: val1
+                    type: int
+              tag: 0
+              value expressions:
+                    expr: val1
+                    type: int
+                    expr: val2
+                    type: int
+        b 
+          TableScan
+            alias: b
+            Reduce Output Operator
+              key expressions:
+                    expr: val1
+                    type: int
+              sort order: +
+              Map-reduce partition columns:
+                    expr: val1
+                    type: int
+              tag: 1
+              value expressions:
+                    expr: val1
+                    type: int
+                    expr: val2
+                    type: int
+        c 
+          TableScan
+            alias: c
+            Reduce Output Operator
+              key expressions:
+                    expr: val1
+                    type: int
+              sort order: +
+              Map-reduce partition columns:
+                    expr: val1
+                    type: int
+              tag: 2
+              value expressions:
+                    expr: val1
+                    type: int
+                    expr: val2
+                    type: int
+        d 
+          TableScan
+            alias: d
+            Reduce Output Operator
+              key expressions:
+                    expr: val1
+                    type: int
+              sort order: +
+              Map-reduce partition columns:
+                    expr: val1
+                    type: int
+              tag: 3
+              value expressions:
+                    expr: val1
+                    type: int
+                    expr: val2
+                    type: int
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+               Inner Join 0 to 2
+               Inner Join 0 to 3
+          condition expressions:
+            0 {VALUE._col0} {VALUE._col1}
+            1 {VALUE._col0} {VALUE._col1}
+            2 {VALUE._col0} {VALUE._col1}
+            3 {VALUE._col0} {VALUE._col1}
+          handleSkewJoin: false
+          outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col12, 
_col13
+          File Output Operator
+            compressed: false
+            GlobalTableId: 0
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+
+  Stage: Stage-2
+    Map Reduce
+      Alias -> Map Operator Tree:
+        $INTNAME 
+            Reduce Output Operator
+              key expressions:
+                    expr: _col1
+                    type: int
+              sort order: +
+              Map-reduce partition columns:
+                    expr: _col1
+                    type: int
+              tag: 0
+              value expressions:
+                    expr: _col12
+                    type: int
+                    expr: _col13
+                    type: int
+                    expr: _col4
+                    type: int
+                    expr: _col5
+                    type: int
+                    expr: _col8
+                    type: int
+                    expr: _col9
+                    type: int
+                    expr: _col0
+                    type: int
+                    expr: _col1
+                    type: int
+        e 
+          TableScan
+            alias: e
+            Reduce Output Operator
+              key expressions:
+                    expr: val2
+                    type: int
+              sort order: +
+              Map-reduce partition columns:
+                    expr: val2
+                    type: int
+              tag: 1
+              value expressions:
+                    expr: val1
+                    type: int
+                    expr: val2
+                    type: int
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          condition expressions:
+            0 {VALUE._col0} {VALUE._col1} {VALUE._col4} {VALUE._col5} 
{VALUE._col8} {VALUE._col9} {VALUE._col12} {VALUE._col13}
+            1 {VALUE._col0} {VALUE._col1}
+          handleSkewJoin: false
+          outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col12, 
_col13, _col16, _col17
+          Select Operator
+            expressions:
+                  expr: _col12
+                  type: int
+                  expr: _col13
+                  type: int
+                  expr: _col4
+                  type: int
+                  expr: _col5
+                  type: int
+                  expr: _col8
+                  type: int
+                  expr: _col9
+                  type: int
+                  expr: _col0
+                  type: int
+                  expr: _col1
+                  type: int
+                  expr: _col16
+                  type: int
+                  expr: _col17
+                  type: int
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6, _col7, _col8, _col9
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+


Reply via email to