Repository: hive
Updated Branches:
  refs/heads/master dd0bc33d1 -> e213c4cee


http://git-wip-us.apache.org/repos/asf/hive/blob/e213c4ce/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_3.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_3.q.out
 
b/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_3.q.out
index 7059647..3b53ec7 100644
--- 
a/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_3.q.out
+++ 
b/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_3.q.out
@@ -365,10 +365,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 3 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [4:part_col (int)]
-                            partition key expr: [part_col]
+                            Target Columns: [Map 4 -> [part_col:int 
(part_col)]]
                             Statistics: Num rows: 1 Data size: 3 Basic stats: 
COMPLETE Column stats: NONE
-                            target works: [Map 4]
             Local Work:
               Map Reduce Local Work
 
@@ -417,10 +415,8 @@ STAGE PLANS:
                               outputColumnNames: _col0
                               Statistics: Num rows: 4 Data size: 4 Basic 
stats: COMPLETE Column stats: NONE
                               Spark Partition Pruning Sink Operator
-                                Target column: [1:part_col (int)]
-                                partition key expr: [part_col]
+                                Target Columns: [Map 1 -> [part_col:int 
(part_col)]]
                                 Statistics: Num rows: 4 Data size: 4 Basic 
stats: COMPLETE Column stats: NONE
-                                target works: [Map 1]
             Local Work:
               Map Reduce Local Work
 
@@ -558,10 +554,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 3 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:part_col (int)]
-                            partition key expr: [part_col]
+                            Target Columns: [Map 1 -> [part_col:int 
(part_col)]]
                             Statistics: Num rows: 1 Data size: 3 Basic stats: 
COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Local Work:
               Map Reduce Local Work
         Map 4 
@@ -591,10 +585,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 3 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:part_col (int)]
-                            partition key expr: [part_col]
+                            Target Columns: [Map 1 -> [part_col:int 
(part_col)]]
                             Statistics: Num rows: 1 Data size: 3 Basic stats: 
COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Local Work:
               Map Reduce Local Work
 
@@ -734,10 +726,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 3 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:part_col (int)]
-                            partition key expr: [part_col]
+                            Target Columns: [Map 1 -> [part_col:int 
(part_col)]]
                             Statistics: Num rows: 1 Data size: 3 Basic stats: 
COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Local Work:
               Map Reduce Local Work
 
@@ -771,10 +761,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 3 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [5:part_col (int)]
-                            partition key expr: [part_col]
+                            Target Columns: [Map 5 -> [part_col:int 
(part_col)]]
                             Statistics: Num rows: 1 Data size: 3 Basic stats: 
COMPLETE Column stats: NONE
-                            target works: [Map 5]
             Local Work:
               Map Reduce Local Work
 
@@ -954,10 +942,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 3 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [2:part_col (int)]
-                            partition key expr: [part_col]
+                            Target Columns: [Map 2 -> [part_col:int 
(part_col)]]
                             Statistics: Num rows: 1 Data size: 3 Basic stats: 
COMPLETE Column stats: NONE
-                            target works: [Map 2]
             Local Work:
               Map Reduce Local Work
 
@@ -1088,10 +1074,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 3 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [2:part_col (int)]
-                            partition key expr: [part_col]
+                            Target Columns: [Map 2 -> [part_col:int 
(part_col)]]
                             Statistics: Num rows: 1 Data size: 3 Basic stats: 
COMPLETE Column stats: NONE
-                            target works: [Map 2]
             Local Work:
               Map Reduce Local Work
 
@@ -1235,10 +1219,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 6 Data size: 6 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:part_col (int)]
-                            partition key expr: [part_col]
+                            Target Columns: [Map 1 -> [part_col:int 
(part_col)]]
                             Statistics: Num rows: 6 Data size: 6 Basic stats: 
COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Local Work:
               Map Reduce Local Work
 
@@ -1374,10 +1356,8 @@ STAGE PLANS:
                         outputColumnNames: _col0
                         Statistics: Num rows: 18 Data size: 18 Basic stats: 
COMPLETE Column stats: NONE
                         Spark Partition Pruning Sink Operator
-                          Target column: [1:part_col1 (int)]
-                          partition key expr: [part_col1]
+                          Target Columns: [Map 1 -> [part_col1:int 
(part_col1)]]
                           Statistics: Num rows: 18 Data size: 18 Basic stats: 
COMPLETE Column stats: NONE
-                          target works: [Map 1]
                     Select Operator
                       expressions: _col1 (type: int)
                       outputColumnNames: _col0
@@ -1388,10 +1368,8 @@ STAGE PLANS:
                         outputColumnNames: _col0
                         Statistics: Num rows: 18 Data size: 18 Basic stats: 
COMPLETE Column stats: NONE
                         Spark Partition Pruning Sink Operator
-                          Target column: [1:part_col2 (int)]
-                          partition key expr: [part_col2]
+                          Target Columns: [Map 1 -> [part_col2:int 
(part_col2)]]
                           Statistics: Num rows: 18 Data size: 18 Basic stats: 
COMPLETE Column stats: NONE
-                          target works: [Map 1]
             Local Work:
               Map Reduce Local Work
 
@@ -1527,10 +1505,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 3 Data size: 9 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [3:part_col (int)]
-                            partition key expr: [part_col]
+                            Target Columns: [Map 3 -> [part_col:int 
(part_col)]]
                             Statistics: Num rows: 3 Data size: 9 Basic stats: 
COMPLETE Column stats: NONE
-                            target works: [Map 3]
             Local Work:
               Map Reduce Local Work
         Map 2 
@@ -1560,10 +1536,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 3 Data size: 9 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [3:part_col (int)]
-                            partition key expr: [part_col]
+                            Target Columns: [Map 3 -> [part_col:int 
(part_col)]]
                             Statistics: Num rows: 3 Data size: 9 Basic stats: 
COMPLETE Column stats: NONE
-                            target works: [Map 3]
             Local Work:
               Map Reduce Local Work
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e213c4ce/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_4.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_4.q.out
 
b/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_4.q.out
index 685e874..7bc8b4f 100644
--- 
a/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_4.q.out
+++ 
b/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_4.q.out
@@ -138,10 +138,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 500 Data size: 5312 Basic 
stats: COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [3:p (string), 6:p (string)]
-                            partition key expr: [p, p]
+                            Target Columns: [Map 3 -> [p:string (p)], Map 6 -> 
[p:string (p)]]
                             Statistics: Num rows: 500 Data size: 5312 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 3, Map 6]
 
   Stage: Stage-1
     Spark
@@ -368,10 +366,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 500 Data size: 5312 Basic 
stats: COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [3:p (string), 6:q (string)]
-                            partition key expr: [p, q]
+                            Target Columns: [Map 3 -> [p:string (p)], Map 6 -> 
[q:string (q)]]
                             Statistics: Num rows: 500 Data size: 5312 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 3, Map 6]
 
   Stage: Stage-1
     Spark
@@ -598,10 +594,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 500 Data size: 5312 Basic 
stats: COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [3:q (string)]
-                            partition key expr: [q]
+                            Target Columns: [Map 3 -> [q:string (q)]]
                             Statistics: Num rows: 500 Data size: 5312 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 3]
 
   Stage: Stage-1
     Spark
@@ -761,10 +755,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 500 Data size: 5312 Basic 
stats: COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [3:p (string)]
-                            partition key expr: [p]
+                            Target Columns: [Map 3 -> [p:string (p)]]
                             Statistics: Num rows: 500 Data size: 5312 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 3]
         Map 8 
             Map Operator Tree:
                 TableScan
@@ -787,10 +779,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 500 Data size: 5312 Basic 
stats: COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [6:p (string)]
-                            partition key expr: [p]
+                            Target Columns: [Map 6 -> [p:string (p)]]
                             Statistics: Num rows: 500 Data size: 5312 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 6]
 
   Stage: Stage-1
     Spark
@@ -950,10 +940,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 500 Data size: 5312 Basic 
stats: COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [3:p (string)]
-                            partition key expr: [p]
+                            Target Columns: [Map 3 -> [p:string (p)]]
                             Statistics: Num rows: 500 Data size: 5312 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 3]
         Map 8 
             Map Operator Tree:
                 TableScan
@@ -976,10 +964,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 500 Data size: 5312 Basic 
stats: COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [6:p (string)]
-                            partition key expr: [p]
+                            Target Columns: [Map 6 -> [p:string (p)]]
                             Statistics: Num rows: 500 Data size: 5312 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 6]
 
   Stage: Stage-1
     Spark
@@ -1159,10 +1145,8 @@ STAGE PLANS:
                         outputColumnNames: _col0
                         Statistics: Num rows: 200 Data size: 2000 Basic stats: 
COMPLETE Column stats: NONE
                         Spark Partition Pruning Sink Operator
-                          Target column: [4:q (string), 8:q (string)]
-                          partition key expr: [q, q]
+                          Target Columns: [Map 4 -> [q:string (q)], Map 8 -> 
[q:string (q)]]
                           Statistics: Num rows: 200 Data size: 2000 Basic 
stats: COMPLETE Column stats: NONE
-                          target works: [Map 4, Map 8]
 
   Stage: Stage-1
     Spark
@@ -1424,24 +1408,8 @@ STAGE PLANS:
                         outputColumnNames: _col0
                         Statistics: Num rows: 200 Data size: 2000 Basic stats: 
COMPLETE Column stats: NONE
                         Spark Partition Pruning Sink Operator
-                          Target column: [4:q (string), 8:q (string)]
-                          partition key expr: [q, q]
+                          Target Columns: [Map 4 -> [q:string (q), p:string 
(p)], Map 8 -> [q:string (q), p:string (p)]]
                           Statistics: Num rows: 200 Data size: 2000 Basic 
stats: COMPLETE Column stats: NONE
-                          target works: [Map 4, Map 8]
-                    Select Operator
-                      expressions: _col0 (type: string)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 200 Data size: 2000 Basic stats: 
COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 200 Data size: 2000 Basic stats: 
COMPLETE Column stats: NONE
-                        Spark Partition Pruning Sink Operator
-                          Target column: [4:p (string), 8:p (string)]
-                          partition key expr: [p, p]
-                          Statistics: Num rows: 200 Data size: 2000 Basic 
stats: COMPLETE Column stats: NONE
-                          target works: [Map 4, Map 8]
 
   Stage: Stage-1
     Spark
@@ -1700,24 +1668,8 @@ STAGE PLANS:
                         outputColumnNames: _col0
                         Statistics: Num rows: 200 Data size: 2000 Basic stats: 
COMPLETE Column stats: NONE
                         Spark Partition Pruning Sink Operator
-                          Target column: [4:q (string)]
-                          partition key expr: [q]
-                          Statistics: Num rows: 200 Data size: 2000 Basic 
stats: COMPLETE Column stats: NONE
-                          target works: [Map 4]
-                    Select Operator
-                      expressions: _col0 (type: string)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 200 Data size: 2000 Basic stats: 
COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 200 Data size: 2000 Basic stats: 
COMPLETE Column stats: NONE
-                        Spark Partition Pruning Sink Operator
-                          Target column: [4:p (string)]
-                          partition key expr: [p]
+                          Target Columns: [Map 4 -> [q:string (q), p:string 
(p)]]
                           Statistics: Num rows: 200 Data size: 2000 Basic 
stats: COMPLETE Column stats: NONE
-                          target works: [Map 4]
         Reducer 12 
             Reduce Operator Tree:
               Select Operator
@@ -1740,10 +1692,8 @@ STAGE PLANS:
                         outputColumnNames: _col0
                         Statistics: Num rows: 200 Data size: 2000 Basic stats: 
COMPLETE Column stats: NONE
                         Spark Partition Pruning Sink Operator
-                          Target column: [8:p (string)]
-                          partition key expr: [p]
+                          Target Columns: [Map 8 -> [p:string (p)]]
                           Statistics: Num rows: 200 Data size: 2000 Basic 
stats: COMPLETE Column stats: NONE
-                          target works: [Map 8]
                     Select Operator
                       expressions: _col1 (type: string)
                       outputColumnNames: _col0
@@ -1754,10 +1704,8 @@ STAGE PLANS:
                         outputColumnNames: _col0
                         Statistics: Num rows: 200 Data size: 2000 Basic stats: 
COMPLETE Column stats: NONE
                         Spark Partition Pruning Sink Operator
-                          Target column: [8:q (string)]
-                          partition key expr: [q]
+                          Target Columns: [Map 8 -> [q:string (q)]]
                           Statistics: Num rows: 200 Data size: 2000 Basic 
stats: COMPLETE Column stats: NONE
-                          target works: [Map 8]
 
   Stage: Stage-1
     Spark
@@ -1990,10 +1938,8 @@ STAGE PLANS:
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 184 Basic stats: 
COMPLETE Column stats: NONE
                         Spark Partition Pruning Sink Operator
-                          Target column: [1:p (string), 5:p (string)]
-                          partition key expr: [p, p]
+                          Target Columns: [Map 1 -> [p:string (p)], Map 5 -> 
[p:string (p)]]
                           Statistics: Num rows: 1 Data size: 184 Basic stats: 
COMPLETE Column stats: NONE
-                          target works: [Map 1, Map 5]
         Reducer 16 
             Reduce Operator Tree:
               Group By Operator
@@ -2019,10 +1965,8 @@ STAGE PLANS:
                         outputColumnNames: _col0
                         Statistics: Num rows: 2 Data size: 368 Basic stats: 
COMPLETE Column stats: NONE
                         Spark Partition Pruning Sink Operator
-                          Target column: [5:p (string)]
-                          partition key expr: [p]
+                          Target Columns: [Map 5 -> [p:string (p)]]
                           Statistics: Num rows: 2 Data size: 368 Basic stats: 
COMPLETE Column stats: NONE
-                          target works: [Map 5]
 
   Stage: Stage-1
     Spark

http://git-wip-us.apache.org/repos/asf/hive/blob/e213c4ce/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_5.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_5.q.out
 
b/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_5.q.out
index 189a43b..70747b9 100644
--- 
a/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_5.q.out
+++ 
b/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_5.q.out
@@ -82,10 +82,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 500 Data size: 5312 Basic 
stats: COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:p (string)]
-                            partition key expr: [p]
+                            Target Columns: [Map 1 -> [p:string (p)]]
                             Statistics: Num rows: 500 Data size: 5312 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
 
   Stage: Stage-1
     Spark
@@ -226,10 +224,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 2 Data size: 20 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:p (string)]
-                            partition key expr: [p]
+                            Target Columns: [Map 1 -> [p:string (p)]]
                             Statistics: Num rows: 2 Data size: 20 Basic stats: 
COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Local Work:
               Map Reduce Local Work
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e213c4ce/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_6.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_6.q.out
 
b/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_6.q.out
new file mode 100644
index 0000000..ff07cb8
--- /dev/null
+++ 
b/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_6.q.out
@@ -0,0 +1,594 @@
+PREHOOK: query: create table part_table_1 (col int) partitioned by (part_col 
int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@part_table_1
+POSTHOOK: query: create table part_table_1 (col int) partitioned by (part_col 
int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@part_table_1
+PREHOOK: query: create table part_table_2 (col int) partitioned by (part_col 
int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@part_table_2
+POSTHOOK: query: create table part_table_2 (col int) partitioned by (part_col 
int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@part_table_2
+PREHOOK: query: create table part_table_3 (col int) partitioned by (part_col1 
int, part_col2 int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@part_table_3
+POSTHOOK: query: create table part_table_3 (col int) partitioned by (part_col1 
int, part_col2 int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@part_table_3
+PREHOOK: query: create table regular_table (col int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@regular_table
+POSTHOOK: query: create table regular_table (col int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@regular_table
+PREHOOK: query: insert into table regular_table values (1)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@regular_table
+POSTHOOK: query: insert into table regular_table values (1)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@regular_table
+POSTHOOK: Lineage: regular_table.col SCRIPT []
+PREHOOK: query: alter table part_table_1 add partition (part_col=1)
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@part_table_1
+POSTHOOK: query: alter table part_table_1 add partition (part_col=1)
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@part_table_1
+POSTHOOK: Output: default@part_table_1@part_col=1
+PREHOOK: query: insert into table part_table_1 partition (part_col=1) values 
(1), (2), (3), (4)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@part_table_1@part_col=1
+POSTHOOK: query: insert into table part_table_1 partition (part_col=1) values 
(1), (2), (3), (4)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@part_table_1@part_col=1
+POSTHOOK: Lineage: part_table_1 PARTITION(part_col=1).col SCRIPT []
+PREHOOK: query: alter table part_table_1 add partition (part_col=2)
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@part_table_1
+POSTHOOK: query: alter table part_table_1 add partition (part_col=2)
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@part_table_1
+POSTHOOK: Output: default@part_table_1@part_col=2
+PREHOOK: query: insert into table part_table_1 partition (part_col=2) values 
(1), (2), (3), (4)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@part_table_1@part_col=2
+POSTHOOK: query: insert into table part_table_1 partition (part_col=2) values 
(1), (2), (3), (4)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@part_table_1@part_col=2
+POSTHOOK: Lineage: part_table_1 PARTITION(part_col=2).col SCRIPT []
+PREHOOK: query: alter table part_table_1 add partition (part_col=3)
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@part_table_1
+POSTHOOK: query: alter table part_table_1 add partition (part_col=3)
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@part_table_1
+POSTHOOK: Output: default@part_table_1@part_col=3
+PREHOOK: query: insert into table part_table_1 partition (part_col=3) values 
(1), (2), (3), (4)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@part_table_1@part_col=3
+POSTHOOK: query: insert into table part_table_1 partition (part_col=3) values 
(1), (2), (3), (4)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@part_table_1@part_col=3
+POSTHOOK: Lineage: part_table_1 PARTITION(part_col=3).col SCRIPT []
+PREHOOK: query: alter table part_table_2 add partition (part_col=1)
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@part_table_2
+POSTHOOK: query: alter table part_table_2 add partition (part_col=1)
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@part_table_2
+POSTHOOK: Output: default@part_table_2@part_col=1
+PREHOOK: query: insert into table part_table_2 partition (part_col=1) values 
(1), (2), (3), (4)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@part_table_2@part_col=1
+POSTHOOK: query: insert into table part_table_2 partition (part_col=1) values 
(1), (2), (3), (4)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@part_table_2@part_col=1
+POSTHOOK: Lineage: part_table_2 PARTITION(part_col=1).col SCRIPT []
+PREHOOK: query: alter table part_table_2 add partition (part_col=2)
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@part_table_2
+POSTHOOK: query: alter table part_table_2 add partition (part_col=2)
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@part_table_2
+POSTHOOK: Output: default@part_table_2@part_col=2
+PREHOOK: query: insert into table part_table_2 partition (part_col=2) values 
(1), (2), (3), (4)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@part_table_2@part_col=2
+POSTHOOK: query: insert into table part_table_2 partition (part_col=2) values 
(1), (2), (3), (4)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@part_table_2@part_col=2
+POSTHOOK: Lineage: part_table_2 PARTITION(part_col=2).col SCRIPT []
+PREHOOK: query: alter table part_table_3 add partition (part_col1=1, 
part_col2=1)
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@part_table_3
+POSTHOOK: query: alter table part_table_3 add partition (part_col1=1, 
part_col2=1)
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@part_table_3
+POSTHOOK: Output: default@part_table_3@part_col1=1/part_col2=1
+PREHOOK: query: insert into table part_table_3 partition (part_col1=1, 
part_col2=1) values (1), (2), (3), (4)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@part_table_3@part_col1=1/part_col2=1
+POSTHOOK: query: insert into table part_table_3 partition (part_col1=1, 
part_col2=1) values (1), (2), (3), (4)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@part_table_3@part_col1=1/part_col2=1
+POSTHOOK: Lineage: part_table_3 PARTITION(part_col1=1,part_col2=1).col SCRIPT 
[]
+PREHOOK: query: explain
+select * from regular_table, part_table_1, part_table_2
+where regular_table.col = part_table_1.part_col and regular_table.col = 
part_table_2.part_col
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select * from regular_table, part_table_1, part_table_2
+where regular_table.col = part_table_1.part_col and regular_table.col = 
part_table_2.part_col
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: part_table_2
+                  Statistics: Num rows: 8 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                  Select Operator
+                    expressions: col (type: int), part_col (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 8 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                    Select Operator
+                      expressions: _col1 (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 8 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 8 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                        Spark Partition Pruning Sink Operator
+                          Target Columns: [Map 4 -> [part_col:int (part_col)]]
+                          Statistics: Num rows: 8 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+        Map 6 
+            Map Operator Tree:
+                TableScan
+                  alias: regular_table
+                  Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE 
Column stats: NONE
+                  Filter Operator
+                    predicate: col is not null (type: boolean)
+                    Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE 
Column stats: NONE
+                    Select Operator
+                      expressions: col (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 1 Basic stats: 
COMPLETE Column stats: NONE
+                      Select Operator
+                        expressions: _col0 (type: int)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 1 Basic stats: 
COMPLETE Column stats: NONE
+                        Group By Operator
+                          keys: _col0 (type: int)
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 1 Data size: 1 Basic stats: 
COMPLETE Column stats: NONE
+                          Spark Partition Pruning Sink Operator
+                            Target Columns: [Map 1 -> [part_col:int 
(part_col)], Map 4 -> [part_col:int (part_col)]]
+                            Statistics: Num rows: 1 Data size: 1 Basic stats: 
COMPLETE Column stats: NONE
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 4), Map 3 (PARTITION-LEVEL 
SORT, 4), Map 4 (PARTITION-LEVEL SORT, 4)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: part_table_2
+                  Statistics: Num rows: 8 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                  Select Operator
+                    expressions: col (type: int), part_col (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 8 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col1 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col1 (type: int)
+                      Statistics: Num rows: 8 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                      value expressions: _col0 (type: int)
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: regular_table
+                  Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE 
Column stats: NONE
+                  Filter Operator
+                    predicate: col is not null (type: boolean)
+                    Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE 
Column stats: NONE
+                    Select Operator
+                      expressions: col (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 1 Basic stats: 
COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 1 Data size: 1 Basic stats: 
COMPLETE Column stats: NONE
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: part_table_1
+                  Statistics: Num rows: 12 Data size: 12 Basic stats: COMPLETE 
Column stats: NONE
+                  Select Operator
+                    expressions: col (type: int), part_col (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 12 Data size: 12 Basic stats: 
COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col1 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col1 (type: int)
+                      Statistics: Num rows: 12 Data size: 12 Basic stats: 
COMPLETE Column stats: NONE
+                      value expressions: _col0 (type: int)
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                     Inner Join 1 to 2
+                keys:
+                  0 _col1 (type: int)
+                  1 _col0 (type: int)
+                  2 _col1 (type: int)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                Statistics: Num rows: 26 Data size: 26 Basic stats: COMPLETE 
Column stats: NONE
+                Select Operator
+                  expressions: _col2 (type: int), _col3 (type: int), _col4 
(type: int), _col0 (type: int), _col1 (type: int)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                  Statistics: Num rows: 26 Data size: 26 Basic stats: COMPLETE 
Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 26 Data size: 26 Basic stats: 
COMPLETE Column stats: NONE
+                    table:
+                        input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from regular_table, part_table_1, part_table_2
+where regular_table.col = part_table_1.part_col and regular_table.col = 
part_table_2.part_col
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part_table_1
+PREHOOK: Input: default@part_table_1@part_col=1
+PREHOOK: Input: default@part_table_1@part_col=2
+PREHOOK: Input: default@part_table_1@part_col=3
+PREHOOK: Input: default@part_table_2
+PREHOOK: Input: default@part_table_2@part_col=1
+PREHOOK: Input: default@part_table_2@part_col=2
+PREHOOK: Input: default@regular_table
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from regular_table, part_table_1, part_table_2
+where regular_table.col = part_table_1.part_col and regular_table.col = 
part_table_2.part_col
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part_table_1
+POSTHOOK: Input: default@part_table_1@part_col=1
+POSTHOOK: Input: default@part_table_1@part_col=2
+POSTHOOK: Input: default@part_table_1@part_col=3
+POSTHOOK: Input: default@part_table_2
+POSTHOOK: Input: default@part_table_2@part_col=1
+POSTHOOK: Input: default@part_table_2@part_col=2
+POSTHOOK: Input: default@regular_table
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1      1       1       1       1
+1      1       1       2       1
+1      1       1       3       1
+1      1       1       4       1
+1      2       1       1       1
+1      2       1       2       1
+1      2       1       3       1
+1      2       1       4       1
+1      3       1       1       1
+1      3       1       2       1
+1      3       1       3       1
+1      3       1       4       1
+1      4       1       1       1
+1      4       1       2       1
+1      4       1       3       1
+1      4       1       4       1
+PREHOOK: query: explain
+select * from regular_table, part_table_1, part_table_2
+where regular_table.col = part_table_1.part_col and regular_table.col = 
part_table_2.part_col
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select * from regular_table, part_table_1, part_table_2
+where regular_table.col = part_table_1.part_col and regular_table.col = 
part_table_2.part_col
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: part_table_2
+                  Statistics: Num rows: 8 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                  Select Operator
+                    expressions: col (type: int), part_col (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 8 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 _col1 (type: int)
+                        1 _col0 (type: int)
+                        2 _col1 (type: int)
+                    Select Operator
+                      expressions: _col1 (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 8 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 8 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                        Spark Partition Pruning Sink Operator
+                          Target Columns: [Map 3 -> [part_col:int (part_col)]]
+                          Statistics: Num rows: 8 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+            Local Work:
+              Map Reduce Local Work
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: regular_table
+                  Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE 
Column stats: NONE
+                  Filter Operator
+                    predicate: col is not null (type: boolean)
+                    Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE 
Column stats: NONE
+                    Select Operator
+                      expressions: col (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 1 Basic stats: 
COMPLETE Column stats: NONE
+                      Spark HashTable Sink Operator
+                        keys:
+                          0 _col1 (type: int)
+                          1 _col0 (type: int)
+                          2 _col1 (type: int)
+                      Select Operator
+                        expressions: _col0 (type: int)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 1 Basic stats: 
COMPLETE Column stats: NONE
+                        Group By Operator
+                          keys: _col0 (type: int)
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 1 Data size: 1 Basic stats: 
COMPLETE Column stats: NONE
+                          Spark Partition Pruning Sink Operator
+                            Target Columns: [Map 3 -> [part_col:int 
(part_col)]]
+                            Statistics: Num rows: 1 Data size: 1 Basic stats: 
COMPLETE Column stats: NONE
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: part_table_1
+                  Statistics: Num rows: 12 Data size: 12 Basic stats: COMPLETE 
Column stats: NONE
+                  Select Operator
+                    expressions: col (type: int), part_col (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 12 Data size: 12 Basic stats: 
COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Inner Join 0 to 1
+                           Inner Join 1 to 2
+                      keys:
+                        0 _col1 (type: int)
+                        1 _col0 (type: int)
+                        2 _col1 (type: int)
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      input vertices:
+                        0 Map 1
+                        1 Map 2
+                      Statistics: Num rows: 26 Data size: 26 Basic stats: 
COMPLETE Column stats: NONE
+                      Select Operator
+                        expressions: _col2 (type: int), _col3 (type: int), 
_col4 (type: int), _col0 (type: int), _col1 (type: int)
+                        outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                        Statistics: Num rows: 26 Data size: 26 Basic stats: 
COMPLETE Column stats: NONE
+                        File Output Operator
+                          compressed: false
+                          Statistics: Num rows: 26 Data size: 26 Basic stats: 
COMPLETE Column stats: NONE
+                          table:
+                              input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                              output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                              serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from regular_table, part_table_1, part_table_2
+where regular_table.col = part_table_1.part_col and regular_table.col = 
part_table_2.part_col
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part_table_1
+PREHOOK: Input: default@part_table_1@part_col=1
+PREHOOK: Input: default@part_table_1@part_col=2
+PREHOOK: Input: default@part_table_1@part_col=3
+PREHOOK: Input: default@part_table_2
+PREHOOK: Input: default@part_table_2@part_col=1
+PREHOOK: Input: default@part_table_2@part_col=2
+PREHOOK: Input: default@regular_table
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from regular_table, part_table_1, part_table_2
+where regular_table.col = part_table_1.part_col and regular_table.col = 
part_table_2.part_col
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part_table_1
+POSTHOOK: Input: default@part_table_1@part_col=1
+POSTHOOK: Input: default@part_table_1@part_col=2
+POSTHOOK: Input: default@part_table_1@part_col=3
+POSTHOOK: Input: default@part_table_2
+POSTHOOK: Input: default@part_table_2@part_col=1
+POSTHOOK: Input: default@part_table_2@part_col=2
+POSTHOOK: Input: default@regular_table
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1      1       1       1       1
+1      1       1       2       1
+1      1       1       3       1
+1      1       1       4       1
+1      2       1       1       1
+1      2       1       2       1
+1      2       1       3       1
+1      2       1       4       1
+1      3       1       1       1
+1      3       1       2       1
+1      3       1       3       1
+1      3       1       4       1
+1      4       1       1       1
+1      4       1       2       1
+1      4       1       3       1
+1      4       1       4       1
+PREHOOK: query: explain
+select * from regular_table, part_table_3
+where regular_table.col=part_table_3.part_col1 and 
regular_table.col=part_table_3.part_col2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select * from regular_table, part_table_3
+where regular_table.col=part_table_3.part_col1 and 
regular_table.col=part_table_3.part_col2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: regular_table
+                  Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE 
Column stats: NONE
+                  Filter Operator
+                    predicate: col is not null (type: boolean)
+                    Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE 
Column stats: NONE
+                    Select Operator
+                      expressions: col (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 1 Basic stats: 
COMPLETE Column stats: NONE
+                      Spark HashTable Sink Operator
+                        keys:
+                          0 _col0 (type: int), _col0 (type: int)
+                          1 _col2 (type: int), _col1 (type: int)
+                      Select Operator
+                        expressions: _col0 (type: int)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 1 Basic stats: 
COMPLETE Column stats: NONE
+                        Group By Operator
+                          keys: _col0 (type: int)
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 1 Data size: 1 Basic stats: 
COMPLETE Column stats: NONE
+                          Spark Partition Pruning Sink Operator
+                            Target Columns: [Map 2 -> [part_col2:int 
(part_col2), part_col1:int (part_col1)]]
+                            Statistics: Num rows: 1 Data size: 1 Basic stats: 
COMPLETE Column stats: NONE
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: part_table_3
+                  Statistics: Num rows: 4 Data size: 4 Basic stats: COMPLETE 
Column stats: NONE
+                  Select Operator
+                    expressions: col (type: int), part_col1 (type: int), 
part_col2 (type: int)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 4 Data size: 4 Basic stats: COMPLETE 
Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Inner Join 0 to 1
+                      keys:
+                        0 _col0 (type: int), _col0 (type: int)
+                        1 _col2 (type: int), _col1 (type: int)
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      input vertices:
+                        0 Map 1
+                      Statistics: Num rows: 4 Data size: 4 Basic stats: 
COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 4 Data size: 4 Basic stats: 
COMPLETE Column stats: NONE
+                        table:
+                            input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from regular_table, part_table_3
+where regular_table.col=part_table_3.part_col1 and 
regular_table.col=part_table_3.part_col2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part_table_3
+PREHOOK: Input: default@part_table_3@part_col1=1/part_col2=1
+PREHOOK: Input: default@regular_table
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select * from regular_table, part_table_3
+where regular_table.col=part_table_3.part_col1 and 
regular_table.col=part_table_3.part_col2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part_table_3
+POSTHOOK: Input: default@part_table_3@part_col1=1/part_col2=1
+POSTHOOK: Input: default@regular_table
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1      1       1       1
+1      2       1       1
+1      3       1       1
+1      4       1       1

http://git-wip-us.apache.org/repos/asf/hive/blob/e213c4ce/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_mapjoin_only.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_mapjoin_only.q.out
 
b/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_mapjoin_only.q.out
index cd566bb..cc16da2 100644
--- 
a/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_mapjoin_only.q.out
+++ 
b/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_mapjoin_only.q.out
@@ -213,10 +213,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1000 Data size: 24624 Basic 
stats: COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:hr (string)]
-                            partition key expr: [hr]
+                            Target Columns: [Map 1 -> [hr:string (hr)]]
                             Statistics: Num rows: 1000 Data size: 24624 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
 
   Stage: Stage-3
     Spark
@@ -249,10 +247,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 21 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:ds (string)]
-                            partition key expr: [ds]
+                            Target Columns: [Map 1 -> [ds:string (ds)]]
                             Statistics: Num rows: 1 Data size: 21 Basic stats: 
COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Local Work:
               Map Reduce Local Work
 
@@ -387,10 +383,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 21 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:ds (string)]
-                            partition key expr: [ds]
+                            Target Columns: [Map 1 -> [ds:string (ds)]]
                             Statistics: Num rows: 1 Data size: 21 Basic stats: 
COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Local Work:
               Map Reduce Local Work
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e213c4ce/ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out
 
b/ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out
index 11ca22a..1916d25 100644
--- 
a/ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out
+++ 
b/ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out
@@ -323,10 +323,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 188 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:ds (string)]
-                            partition key expr: [ds]
+                            Target Columns: [Map 1 -> [ds:string (ds)]]
                             Statistics: Num rows: 1 Data size: 188 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -810,10 +808,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 188 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:ds (string)]
-                            partition key expr: [day(ds)]
+                            Target Columns: [Map 1 -> [ds:string (day(ds))]]
                             Statistics: Num rows: 1 Data size: 188 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -1292,10 +1288,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 188 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:ds (string)]
-                            partition key expr: [abs(((- 
UDFToLong(concat(UDFToString(day(ds)), '0'))) + 10))]
+                            Target Columns: [Map 1 -> [ds:string (abs(((- 
UDFToLong(concat(UDFToString(day(ds)), '0'))) + 10)))]]
                             Statistics: Num rows: 1 Data size: 188 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -1576,10 +1570,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 188 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:ds (string)]
-                            partition key expr: [CAST( UDFToShort(day(ds)) AS 
decimal(10,0))]
+                            Target Columns: [Map 1 -> [ds:string (CAST( 
UDFToShort(day(ds)) AS decimal(10,0)))]]
                             Statistics: Num rows: 1 Data size: 188 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -1854,10 +1846,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 188 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:ds (string)]
-                            partition key expr: [ds]
+                            Target Columns: [Map 1 -> [ds:string (ds)]]
                             Statistics: Num rows: 1 Data size: 188 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -1919,10 +1909,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 172 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:hr (string)]
-                            partition key expr: [hr]
+                            Target Columns: [Map 1 -> [hr:string (hr)]]
                             Statistics: Num rows: 1 Data size: 172 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -2560,10 +2548,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 360 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:ds (string)]
-                            partition key expr: [ds]
+                            Target Columns: [Map 1 -> [ds:string (ds)]]
                             Statistics: Num rows: 1 Data size: 360 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
                       Select Operator
                         expressions: _col2 (type: string)
                         outputColumnNames: _col0
@@ -2585,10 +2571,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 360 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:hr (string)]
-                            partition key expr: [hr]
+                            Target Columns: [Map 1 -> [hr:string (hr)]]
                             Statistics: Num rows: 1 Data size: 360 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -3070,10 +3054,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 188 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:ds (string)]
-                            partition key expr: [ds]
+                            Target Columns: [Map 1 -> [ds:string (ds)]]
                             Statistics: Num rows: 1 Data size: 188 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -3557,10 +3539,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 94 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:hr (string)]
-                            partition key expr: [UDFToDouble(hr)]
+                            Target Columns: [Map 1 -> [hr:string 
(UDFToDouble(hr))]]
                             Statistics: Num rows: 1 Data size: 94 Basic stats: 
COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -3833,10 +3813,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 94 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:hr (string)]
-                            partition key expr: [(UDFToDouble(hr) * 2.0D)]
+                            Target Columns: [Map 1 -> [hr:string 
((UDFToDouble(hr) * 2.0D))]]
                             Statistics: Num rows: 1 Data size: 94 Basic stats: 
COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -4527,10 +4505,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 94 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:hr (string)]
-                            partition key expr: [UDFToString((UDFToDouble(hr) 
* 2.0D))]
+                            Target Columns: [Map 1 -> [hr:string 
(UDFToString((UDFToDouble(hr) * 2.0D)))]]
                             Statistics: Num rows: 1 Data size: 94 Basic stats: 
COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -5272,10 +5248,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 360 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:ds (string)]
-                            partition key expr: [ds]
+                            Target Columns: [Map 1 -> [ds:string (ds)]]
                             Statistics: Num rows: 1 Data size: 360 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
                       Select Operator
                         expressions: _col2 (type: string)
                         outputColumnNames: _col0
@@ -5297,10 +5271,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 360 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:hr (string)]
-                            partition key expr: [hr]
+                            Target Columns: [Map 1 -> [hr:string (hr)]]
                             Statistics: Num rows: 1 Data size: 360 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -5570,10 +5542,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 188 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:ds (string)]
-                            partition key expr: [ds]
+                            Target Columns: [Map 1 -> [ds:string (ds)]]
                             Statistics: Num rows: 1 Data size: 188 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -5825,10 +5795,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 188 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [4:ds (string)]
-                            partition key expr: [ds]
+                            Target Columns: [Map 4 -> [ds:string (ds)]]
                             Statistics: Num rows: 1 Data size: 188 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 4]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -6079,10 +6047,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 188 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:ds (string)]
-                            partition key expr: [ds]
+                            Target Columns: [Map 1 -> [ds:string (ds)]]
                             Statistics: Num rows: 1 Data size: 188 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -6335,10 +6301,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 188 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:ds (string)]
-                            partition key expr: [ds]
+                            Target Columns: [Map 1 -> [ds:string (ds)]]
                             Statistics: Num rows: 1 Data size: 188 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -6400,10 +6364,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 172 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:hr (string)]
-                            partition key expr: [hr]
+                            Target Columns: [Map 1 -> [hr:string (hr)]]
                             Statistics: Num rows: 1 Data size: 172 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -7160,10 +7122,8 @@ STAGE PLANS:
                         outputColumnNames: _col0
                         Statistics: Num rows: 2 Data size: 368 Basic stats: 
COMPLETE Column stats: NONE
                         Spark Partition Pruning Sink Operator
-                          Target column: [1:ds (string)]
-                          partition key expr: [ds]
+                          Target Columns: [Map 1 -> [ds:string (ds)]]
                           Statistics: Num rows: 2 Data size: 368 Basic stats: 
COMPLETE Column stats: NONE
-                          target works: [Map 1]
         Reducer 9 
             Execution mode: vectorized
             Reduce Vectorization:
@@ -7232,10 +7192,8 @@ STAGE PLANS:
                         outputColumnNames: _col0
                         Statistics: Num rows: 2 Data size: 368 Basic stats: 
COMPLETE Column stats: NONE
                         Spark Partition Pruning Sink Operator
-                          Target column: [1:ds (string)]
-                          partition key expr: [ds]
+                          Target Columns: [Map 1 -> [ds:string (ds)]]
                           Statistics: Num rows: 2 Data size: 368 Basic stats: 
COMPLETE Column stats: NONE
-                          target works: [Map 1]
 
   Stage: Stage-1
     Spark
@@ -7801,10 +7759,8 @@ STAGE PLANS:
                         outputColumnNames: _col0
                         Statistics: Num rows: 2 Data size: 368 Basic stats: 
COMPLETE Column stats: NONE
                         Spark Partition Pruning Sink Operator
-                          Target column: [1:ds (string)]
-                          partition key expr: [ds]
+                          Target Columns: [Map 1 -> [ds:string (ds)]]
                           Statistics: Num rows: 2 Data size: 368 Basic stats: 
COMPLETE Column stats: NONE
-                          target works: [Map 1]
         Reducer 9 
             Execution mode: vectorized
             Reduce Vectorization:
@@ -7873,10 +7829,8 @@ STAGE PLANS:
                         outputColumnNames: _col0
                         Statistics: Num rows: 2 Data size: 368 Basic stats: 
COMPLETE Column stats: NONE
                         Spark Partition Pruning Sink Operator
-                          Target column: [1:ds (string)]
-                          partition key expr: [ds]
+                          Target Columns: [Map 1 -> [ds:string (ds)]]
                           Statistics: Num rows: 2 Data size: 368 Basic stats: 
COMPLETE Column stats: NONE
-                          target works: [Map 1]
 
   Stage: Stage-1
     Spark
@@ -8445,10 +8399,8 @@ STAGE PLANS:
                         outputColumnNames: _col0
                         Statistics: Num rows: 2 Data size: 368 Basic stats: 
COMPLETE Column stats: NONE
                         Spark Partition Pruning Sink Operator
-                          Target column: [1:ds (string)]
-                          partition key expr: [ds]
+                          Target Columns: [Map 1 -> [ds:string (ds)]]
                           Statistics: Num rows: 2 Data size: 368 Basic stats: 
COMPLETE Column stats: NONE
-                          target works: [Map 1]
         Reducer 13 
             Execution mode: vectorized
             Reduce Vectorization:
@@ -8517,10 +8469,8 @@ STAGE PLANS:
                         outputColumnNames: _col0
                         Statistics: Num rows: 2 Data size: 368 Basic stats: 
COMPLETE Column stats: NONE
                         Spark Partition Pruning Sink Operator
-                          Target column: [1:ds (string)]
-                          partition key expr: [ds]
+                          Target Columns: [Map 1 -> [ds:string (ds)]]
                           Statistics: Num rows: 2 Data size: 368 Basic stats: 
COMPLETE Column stats: NONE
-                          target works: [Map 1]
 
   Stage: Stage-1
     Spark
@@ -8964,10 +8914,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 188 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:ds (string)]
-                            partition key expr: [ds]
+                            Target Columns: [Map 1 -> [ds:string (ds)]]
                             Statistics: Num rows: 1 Data size: 188 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -9209,10 +9157,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 188 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:ds (string)]
-                            partition key expr: [day(ds)]
+                            Target Columns: [Map 1 -> [ds:string (day(ds))]]
                             Statistics: Num rows: 1 Data size: 188 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -9447,10 +9393,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 188 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:ds (string)]
-                            partition key expr: [ds]
+                            Target Columns: [Map 1 -> [ds:string (ds)]]
                             Statistics: Num rows: 1 Data size: 188 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -9521,10 +9465,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 172 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:hr (string)]
-                            partition key expr: [hr]
+                            Target Columns: [Map 1 -> [hr:string (hr)]]
                             Statistics: Num rows: 1 Data size: 172 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -9786,10 +9728,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 360 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:ds (string)]
-                            partition key expr: [ds]
+                            Target Columns: [Map 1 -> [ds:string (ds)]]
                             Statistics: Num rows: 1 Data size: 360 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
                       Select Operator
                         expressions: _col2 (type: string)
                         outputColumnNames: _col0
@@ -9811,10 +9751,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 360 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:hr (string)]
-                            partition key expr: [hr]
+                            Target Columns: [Map 1 -> [hr:string (hr)]]
                             Statistics: Num rows: 1 Data size: 360 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -10054,10 +9992,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 188 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:ds (string)]
-                            partition key expr: [ds]
+                            Target Columns: [Map 1 -> [ds:string (ds)]]
                             Statistics: Num rows: 1 Data size: 188 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -10290,10 +10226,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 94 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:hr (string)]
-                            partition key expr: [UDFToDouble(hr)]
+                            Target Columns: [Map 1 -> [hr:string 
(UDFToDouble(hr))]]
                             Statistics: Num rows: 1 Data size: 94 Basic stats: 
COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -10526,10 +10460,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 94 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:hr (string)]
-                            partition key expr: [(UDFToDouble(hr) * 2.0D)]
+                            Target Columns: [Map 1 -> [hr:string 
((UDFToDouble(hr) * 2.0D))]]
                             Statistics: Num rows: 1 Data size: 94 Basic stats: 
COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -11034,10 +10966,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 188 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:ds (string)]
-                            partition key expr: [ds]
+                            Target Columns: [Map 1 -> [ds:string (ds)]]
                             Statistics: Num rows: 1 Data size: 188 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -11632,10 +11562,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 188 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:ds (string)]
-                            partition key expr: [ds]
+                            Target Columns: [Map 1 -> [ds:string (ds)]]
                             Statistics: Num rows: 1 Data size: 188 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -11706,10 +11634,8 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 172 Basic stats: 
COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target column: [1:hr (string)]
-                            partition key expr: [hr]
+                            Target Columns: [Map 1 -> [hr:string (hr)]]
                             Statistics: Num rows: 1 Data size: 172 Basic 
stats: COMPLETE Column stats: NONE
-                            target works: [Map 1]
             Execution mode: vectorized
             Map Vectorization:
                 enabled: true
@@ -12365,10 +12291,8 @@ STAGE PLANS:
                         outputColumnNames: _col0
                         Statistics: Num rows: 2 Data size: 368 Basic stats: 
COMPLETE Column stats: NONE
                         Spark Partition Pruning Sink Operator
-                          Target column: [1:ds (string)]
-                          partition key expr: [ds]
+                          Target Columns: [Map 1 -> [ds:string (ds)]]
                           Statistics: Num rows: 2 Data size: 368 Basic stats: 
COMPLETE Column stats: NONE
-                          target works: [Map 1]
         Reducer 9 
             Execution mode: vectorized
             Reduce Vectorization:
@@ -12437,10 +12361,8 @@ STAGE PLANS:
                         outputColumnNames: _col0
                         Statistics: Num rows: 2 Data size: 368 Basic stats: 
COMPLETE Column stats: NONE
                         Spark Partition Pruning Sink Operator
-                          Target column: [1:ds (string)]
-                          partition key expr: [ds]
+                          Target Columns: [Map 1 -> [ds:string (ds)]]
                           Statistics: Num rows: 2 Data size: 368 Basic stats: 
COMPLETE Column stats: NONE
-                          target works: [Map 1]
 
   Stage: Stage-1
     Spark

Reply via email to