http://git-wip-us.apache.org/repos/asf/hive/blob/1f258e96/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_2.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_2.q.out 
b/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_2.q.out
new file mode 100644
index 0000000..8519ff3
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_2.q.out
@@ -0,0 +1,1256 @@
+PREHOOK: query: -- Create two bucketed and sorted tables
+CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table1
+POSTHOOK: query: -- Create two bucketed and sorted tables
+CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table1
+PREHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED 
BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table2
+POSTHOOK: query: CREATE TABLE test_table2 (key INT, value STRING) PARTITIONED 
BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table2
+PREHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED 
BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_table3
+POSTHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED 
BY (ds STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table3
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table1@ds=1
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT * where key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table1@ds=1
+POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE 
[(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 
100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table2@ds=1
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT * where key < 
100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table2@ds=1
+POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE 
[(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '2') SELECT * where key < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table1@ds=2
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '2') SELECT * where key < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table1@ds=2
+POSTHOOK: Lineage: test_table1 PARTITION(ds=2).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table1 PARTITION(ds=2).value SIMPLE 
[(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT * where key < 
100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@test_table2@ds=2
+POSTHOOK: query: FROM src
+INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT * where key < 
100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@test_table2@ds=2
+POSTHOOK: Lineage: test_table2 PARTITION(ds=2).key EXPRESSION 
[(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test_table2 PARTITION(ds=2).value SIMPLE 
[(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- Insert data into the bucketed table by selecting from 
another bucketed table
+-- This should be a map-only operation
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Insert data into the bucketed table by selecting from 
another bucketed table
+-- This should be a map-only operation
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 150 Basic stats: 
COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 150 Basic stats: 
COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 10 Data size: 150 Basic stats: 
COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 10 Data size: 150 Basic stats: 
COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 84 Data size: 1408 Basic stats: 
COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 84 Data size: 1408 Basic stats: 
COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 84 Data size: 1408 Basic stats: 
COMPLETE Column stats: NONE
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        outputColumnNames: _col0, _col1, _col4
+                        input vertices:
+                          0 Map 1
+                        Statistics: Num rows: 92 Data size: 1548 Basic stats: 
COMPLETE Column stats: NONE
+                        Select Operator
+                          expressions: _col0 (type: int), concat(_col1, _col4) 
(type: string)
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 92 Data size: 1548 Basic 
stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int)
+                            sort order: +
+                            Map-reduce partition columns: _col0 (type: int)
+                            Statistics: Num rows: 92 Data size: 1548 Basic 
stats: COMPLETE Column stats: NONE
+                            value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 
(type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 92 Data size: 1548 Basic stats: COMPLETE 
Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 92 Data size: 1548 Basic stats: 
COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table3
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table3
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+PREHOOK: Output: default@test_table3@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+POSTHOOK: Output: default@test_table3@ds=1
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE 
[(test_table1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION 
[(test_table1)a.FieldSchema(name:value, type:string, comment:null), 
(test_table2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s 
where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s 
where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+2      val_2val_2      1
+4      val_4val_4      1
+8      val_8val_8      1
+PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s 
where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s 
where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+9      val_9val_9      1
+PREHOOK: query: -- Since more than one partition of 'a' (the big table) is 
being selected,
+-- it should be a map-reduce job
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds is not null and b.ds = '1'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Since more than one partition of 'a' (the big table) is 
being selected,
+-- it should be a map-reduce job
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds is not null and b.ds = '1'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 20 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 20 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 20 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 20 Data size: 300 Basic stats: 
COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 84 Data size: 1408 Basic stats: 
COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 84 Data size: 1408 Basic stats: 
COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 84 Data size: 1408 Basic stats: 
COMPLETE Column stats: NONE
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        outputColumnNames: _col0, _col1, _col4
+                        input vertices:
+                          0 Map 1
+                        Statistics: Num rows: 92 Data size: 1548 Basic stats: 
COMPLETE Column stats: NONE
+                        Select Operator
+                          expressions: _col0 (type: int), concat(_col1, _col4) 
(type: string)
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 92 Data size: 1548 Basic 
stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int)
+                            sort order: +
+                            Map-reduce partition columns: _col0 (type: int)
+                            Statistics: Num rows: 92 Data size: 1548 Basic 
stats: COMPLETE Column stats: NONE
+                            value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 
(type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 92 Data size: 1548 Basic stats: COMPLETE 
Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 92 Data size: 1548 Basic stats: 
COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table3
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table3
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds is not null and b.ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+PREHOOK: Input: default@test_table1@ds=2
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+PREHOOK: Output: default@test_table3@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds is not null and b.ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+POSTHOOK: Input: default@test_table1@ds=2
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+POSTHOOK: Output: default@test_table3@ds=1
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE 
[(test_table1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION 
[(test_table1)a.FieldSchema(name:value, type:string, comment:null), 
(test_table2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s 
where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s 
where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+2      val_2val_2      1
+2      val_2val_2      1
+4      val_4val_4      1
+4      val_4val_4      1
+8      val_8val_8      1
+8      val_8val_8      1
+PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s 
where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s 
where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+9      val_9val_9      1
+9      val_9val_9      1
+PREHOOK: query: -- Since a single partition of the big table ('a') is being 
selected, it should be a map-only
+-- job even though multiple partitions of 'b' are being selected
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds is not null
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Since a single partition of the big table ('a') is being 
selected, it should be a map-only
+-- job even though multiple partitions of 'b' are being selected
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds is not null
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 10 Data size: 150 Basic stats: 
COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 150 Basic stats: 
COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 10 Data size: 150 Basic stats: 
COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 10 Data size: 150 Basic stats: 
COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 168 Data size: 2816 Basic stats: 
COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 168 Data size: 2816 Basic stats: 
COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 168 Data size: 2816 Basic stats: 
COMPLETE Column stats: NONE
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        outputColumnNames: _col0, _col1, _col4
+                        input vertices:
+                          0 Map 1
+                        Statistics: Num rows: 184 Data size: 3097 Basic stats: 
COMPLETE Column stats: NONE
+                        Select Operator
+                          expressions: _col0 (type: int), concat(_col1, _col4) 
(type: string)
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 184 Data size: 3097 Basic 
stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int)
+                            sort order: +
+                            Map-reduce partition columns: _col0 (type: int)
+                            Statistics: Num rows: 184 Data size: 3097 Basic 
stats: COMPLETE Column stats: NONE
+                            value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 
(type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 184 Data size: 3097 Basic stats: 
COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 184 Data size: 3097 Basic stats: 
COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table3
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table3
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+PREHOOK: Input: default@test_table2@ds=2
+PREHOOK: Output: default@test_table3@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM test_table1 a JOIN test_table2 b 
+ON a.key = b.key WHERE a.ds = '1' and b.ds is not null
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+POSTHOOK: Input: default@test_table2@ds=2
+POSTHOOK: Output: default@test_table3@ds=1
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE 
[(test_table1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION 
[(test_table1)a.FieldSchema(name:value, type:string, comment:null), 
(test_table2)b.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s 
where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s 
where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+2      val_2val_2      1
+2      val_2val_2      1
+4      val_4val_4      1
+4      val_4val_4      1
+8      val_8val_8      1
+8      val_8val_8      1
+PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s 
where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s 
where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+9      val_9val_9      1
+9      val_9val_9      1
+PREHOOK: query: -- This should be a map-only job
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM 
+(select key, value from test_table1 where ds = '1') a 
+JOIN 
+(select key, value from test_table2 where ds = '1') b 
+ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- This should be a map-only job
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM 
+(select key, value from test_table1 where ds = '1') a 
+JOIN 
+(select key, value from test_table2 where ds = '1') b 
+ON a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: test_table1
+                  Statistics: Num rows: 10 Data size: 150 Basic stats: 
COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 150 Basic stats: 
COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 10 Data size: 150 Basic stats: 
COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 10 Data size: 150 Basic stats: 
COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: test_table2
+                  Statistics: Num rows: 84 Data size: 1408 Basic stats: 
COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 84 Data size: 1408 Basic stats: 
COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 84 Data size: 1408 Basic stats: 
COMPLETE Column stats: NONE
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        outputColumnNames: _col0, _col1, _col3
+                        input vertices:
+                          0 Map 1
+                        Statistics: Num rows: 92 Data size: 1548 Basic stats: 
COMPLETE Column stats: NONE
+                        Select Operator
+                          expressions: _col0 (type: int), concat(_col1, _col3) 
(type: string)
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 92 Data size: 1548 Basic 
stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int)
+                            sort order: +
+                            Map-reduce partition columns: _col0 (type: int)
+                            Statistics: Num rows: 92 Data size: 1548 Basic 
stats: COMPLETE Column stats: NONE
+                            value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 
(type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 92 Data size: 1548 Basic stats: COMPLETE 
Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 92 Data size: 1548 Basic stats: 
COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table3
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table3
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM 
+(select key, value from test_table1 where ds = '1') a 
+JOIN 
+(select key, value from test_table2 where ds = '1') b 
+ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+PREHOOK: Output: default@test_table3@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.value, b.value) 
+FROM 
+(select key, value from test_table1 where ds = '1') a 
+JOIN 
+(select key, value from test_table2 where ds = '1') b 
+ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+POSTHOOK: Output: default@test_table3@ds=1
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE 
[(test_table1)test_table1.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION 
[(test_table1)test_table1.FieldSchema(name:value, type:string, comment:null), 
(test_table2)test_table2.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s 
where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s 
where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+2      val_2val_2      1
+4      val_4val_4      1
+8      val_8val_8      1
+PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s 
where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s 
where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+5      val_5val_5      1
+9      val_9val_9      1
+PREHOOK: query: -- This should be a map-only job
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.v1, b.v2) 
+FROM 
+(select key, concat(value, value) as v1 from test_table1 where ds = '1') a 
+JOIN 
+(select key, concat(value, value) as v2 from test_table2 where ds = '1') b 
+ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- This should be a map-only job
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.v1, b.v2) 
+FROM 
+(select key, concat(value, value) as v1 from test_table1 where ds = '1') a 
+JOIN 
+(select key, concat(value, value) as v2 from test_table2 where ds = '1') b 
+ON a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: test_table1
+                  Statistics: Num rows: 10 Data size: 150 Basic stats: 
COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 150 Basic stats: 
COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), concat(value, value) 
(type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 10 Data size: 150 Basic stats: 
COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 10 Data size: 150 Basic stats: 
COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: test_table2
+                  Statistics: Num rows: 84 Data size: 1408 Basic stats: 
COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 84 Data size: 1408 Basic stats: 
COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), concat(value, value) 
(type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 84 Data size: 1408 Basic stats: 
COMPLETE Column stats: NONE
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        outputColumnNames: _col0, _col1, _col3
+                        input vertices:
+                          0 Map 1
+                        Statistics: Num rows: 92 Data size: 1548 Basic stats: 
COMPLETE Column stats: NONE
+                        Select Operator
+                          expressions: _col0 (type: int), concat(_col1, _col3) 
(type: string)
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 92 Data size: 1548 Basic 
stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int)
+                            sort order: +
+                            Map-reduce partition columns: _col0 (type: int)
+                            Statistics: Num rows: 92 Data size: 1548 Basic 
stats: COMPLETE Column stats: NONE
+                            value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 
(type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 92 Data size: 1548 Basic stats: COMPLETE 
Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 92 Data size: 1548 Basic stats: 
COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table3
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table3
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.v1, b.v2) 
+FROM 
+(select key, concat(value, value) as v1 from test_table1 where ds = '1') a 
+JOIN 
+(select key, concat(value, value) as v2 from test_table2 where ds = '1') b 
+ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+PREHOOK: Output: default@test_table3@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key, concat(a.v1, b.v2) 
+FROM 
+(select key, concat(value, value) as v1 from test_table1 where ds = '1') a 
+JOIN 
+(select key, concat(value, value) as v2 from test_table2 where ds = '1') b 
+ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+POSTHOOK: Output: default@test_table3@ds=1
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE 
[(test_table1)test_table1.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION 
[(test_table1)test_table1.FieldSchema(name:value, type:string, comment:null), 
(test_table2)test_table2.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s 
where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s 
where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+0      val_0val_0val_0val_0    1
+0      val_0val_0val_0val_0    1
+0      val_0val_0val_0val_0    1
+0      val_0val_0val_0val_0    1
+0      val_0val_0val_0val_0    1
+0      val_0val_0val_0val_0    1
+0      val_0val_0val_0val_0    1
+0      val_0val_0val_0val_0    1
+0      val_0val_0val_0val_0    1
+2      val_2val_2val_2val_2    1
+4      val_4val_4val_4val_4    1
+8      val_8val_8val_8val_8    1
+PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s 
where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s 
where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+5      val_5val_5val_5val_5    1
+5      val_5val_5val_5val_5    1
+5      val_5val_5val_5val_5    1
+5      val_5val_5val_5val_5    1
+5      val_5val_5val_5val_5    1
+5      val_5val_5val_5val_5    1
+5      val_5val_5val_5val_5    1
+5      val_5val_5val_5val_5    1
+5      val_5val_5val_5val_5    1
+9      val_9val_9val_9val_9    1
+PREHOOK: query: -- This should be a map-reduce job
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key+a.key, concat(a.value, b.value) 
+FROM 
+(select key, value from test_table1 where ds = '1') a 
+JOIN 
+(select key, value from test_table2 where ds = '1') b 
+ON a.key = b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- This should be a map-reduce job
+EXPLAIN
+INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key+a.key, concat(a.value, b.value) 
+FROM 
+(select key, value from test_table1 where ds = '1') a 
+JOIN 
+(select key, value from test_table2 where ds = '1') b 
+ON a.key = b.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+  Stage-3 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: test_table1
+                  Statistics: Num rows: 10 Data size: 150 Basic stats: 
COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 10 Data size: 150 Basic stats: 
COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 10 Data size: 150 Basic stats: 
COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 10 Data size: 150 Basic stats: 
COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: test_table2
+                  Statistics: Num rows: 84 Data size: 1408 Basic stats: 
COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 84 Data size: 1408 Basic stats: 
COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 84 Data size: 1408 Basic stats: 
COMPLETE Column stats: NONE
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        outputColumnNames: _col0, _col1, _col3
+                        input vertices:
+                          0 Map 1
+                        Statistics: Num rows: 92 Data size: 1548 Basic stats: 
COMPLETE Column stats: NONE
+                        Select Operator
+                          expressions: (_col0 + _col0) (type: int), 
concat(_col1, _col3) (type: string)
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 92 Data size: 1548 Basic 
stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            key expressions: _col0 (type: int)
+                            sort order: +
+                            Map-reduce partition columns: _col0 (type: int)
+                            Statistics: Num rows: 92 Data size: 1548 Basic 
stats: COMPLETE Column stats: NONE
+                            value expressions: _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 
(type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 92 Data size: 1548 Basic stats: COMPLETE 
Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 92 Data size: 1548 Basic stats: 
COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table3
+
+  Stage: Stage-2
+    Dependency Collection
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 1
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test_table3
+
+  Stage: Stage-3
+    Stats-Aggr Operator
+
+PREHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key+a.key, concat(a.value, b.value) 
+FROM 
+(select key, value from test_table1 where ds = '1') a 
+JOIN 
+(select key, value from test_table2 where ds = '1') b 
+ON a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table1
+PREHOOK: Input: default@test_table1@ds=1
+PREHOOK: Input: default@test_table2
+PREHOOK: Input: default@test_table2@ds=1
+PREHOOK: Output: default@test_table3@ds=1
+POSTHOOK: query: INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1')
+SELECT a.key+a.key, concat(a.value, b.value) 
+FROM 
+(select key, value from test_table1 where ds = '1') a 
+JOIN 
+(select key, value from test_table2 where ds = '1') b 
+ON a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table1
+POSTHOOK: Input: default@test_table1@ds=1
+POSTHOOK: Input: default@test_table2
+POSTHOOK: Input: default@test_table2@ds=1
+POSTHOOK: Output: default@test_table3@ds=1
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key EXPRESSION 
[(test_table1)test_table1.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value EXPRESSION 
[(test_table1)test_table1.FieldSchema(name:value, type:string, comment:null), 
(test_table2)test_table2.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s 
where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 1 out of 2) s 
where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+0      val_0val_0      1
+4      val_2val_2      1
+8      val_4val_4      1
+10     val_5val_5      1
+10     val_5val_5      1
+10     val_5val_5      1
+10     val_5val_5      1
+10     val_5val_5      1
+10     val_5val_5      1
+10     val_5val_5      1
+10     val_5val_5      1
+10     val_5val_5      1
+16     val_8val_8      1
+18     val_9val_9      1
+PREHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s 
where ds = '1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_table3
+PREHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from test_table3 tablesample (bucket 2 out of 2) s 
where ds = '1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_table3
+POSTHOOK: Input: default@test_table3@ds=1
+#### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/1f258e96/ql/src/test/results/clientpositive/llap/cbo_rp_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cbo_rp_gby.q.out 
b/ql/src/test/results/clientpositive/llap/cbo_rp_gby.q.out
new file mode 100644
index 0000000..04597a7
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/cbo_rp_gby.q.out
@@ -0,0 +1,124 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+-- 6. Test Select + TS + Join + Fil + GB + GB Having
+select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, 
cbo_t1.c_int, key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+-- 6. Test Select + TS + Join + Fil + GB + GB Having
+select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, 
cbo_t1.c_int, key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+ 1     4       2
+ 1     4       2
+1      4       12
+1      4       2
+NULL   NULL    NULL
+PREHOOK: query: select x, y, count(*) from (select key, (c_int+c_float+1+2) as 
x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by 
y, x
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+POSTHOOK: query: select x, y, count(*) from (select key, (c_int+c_float+1+2) 
as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group 
by y, x
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+#### A masked pattern was here ####
+5.0    12      1
+5.0    2       3
+NULL   NULL    1
+PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, 
c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and 
(cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key 
order by a) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from 
cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 
0)  group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc) cbo_t2 on 
cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or 
c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, 
c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and 
(cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key 
order by a) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from 
cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 
0)  group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc) cbo_t2 on 
cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or 
c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1      12      6
+1      2       6
+PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, 
c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and 
(cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key 
having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) 
>= 0 order by b % c asc, b desc) cbo_t1 left outer join (select key as p, 
c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and 
(cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key  
having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) 
>= 0) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + 
cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c  having 
cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0  order by 
cbo_t3.c_int % c asc, cbo_t3.c_int desc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, 
c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and 
(cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key 
having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) 
>= 0 order by b % c asc, b desc) cbo_t1 left outer join (select key as p, 
c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and 
(cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key  
having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) 
>= 0) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + 
cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c  having 
cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0  order by 
cbo_t3.c_int % c asc, cbo_t3.c_int desc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1      12      6
+1      2       6
+PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, 
c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and 
(cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key 
having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) 
>= 0 order by b+c, a desc) cbo_t1 right outer join (select key as p, c_int+1 as 
q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int 
> 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key having 
cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) 
cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + 
cbo_t2.q >= 2) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, 
c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and 
(cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key 
having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) 
>= 0 order by b+c, a desc) cbo_t1 right outer join (select key as p, c_int+1 as 
q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int 
> 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key having 
cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) 
cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + 
cbo_t2.q >= 2) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1      12      6
+1      2       6
+PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, 
c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and 
(cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key 
having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) 
>= 0 order by c+a desc) cbo_t1 full outer join (select key as p, c_int+1 as q, 
sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 
or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key having 
cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 
order by p+q desc, r asc) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on 
cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by 
cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + 
c) >= 0 order by cbo_t3.c_int
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, 
c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and 
(cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key 
having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) 
>= 0 order by c+a desc) cbo_t1 full outer join (select key as p, c_int+1 as q, 
sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 
or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key having 
cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 
order by p+q desc, r asc) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on 
cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by 
cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + 
c) >= 0 order by cbo_t3.c_int
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1      12      6
+1      2       6
+PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, 
c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and 
(cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key 
having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) 
>= 0) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 
where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  
group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or 
c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p join cbo_t3 on 
cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by 
cbo_t3.c_int, c
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t1
+PREHOOK: Input: default@cbo_t1@dt=2014
+PREHOOK: Input: default@cbo_t2
+PREHOOK: Input: default@cbo_t2@dt=2014
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, 
c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and 
(cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key 
having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) 
>= 0) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 
where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  
group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or 
c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p join cbo_t3 on 
cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by 
cbo_t3.c_int, c
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t1
+POSTHOOK: Input: default@cbo_t1@dt=2014
+POSTHOOK: Input: default@cbo_t2
+POSTHOOK: Input: default@cbo_t2@dt=2014
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1      12      6
+1      2       6

Reply via email to