http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out 
b/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
index eb61044..bd9b852 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
@@ -66,21 +66,105 @@ POSTHOOK: query: ANALYZE TABLE small_table COMPUTE 
STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@small_table
 #### A masked pattern was here ####
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.*, st.*
 from sorted_mod_4 s
 left outer join small_table st
 on s.ctinyint = st.ctinyint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.*, st.*
 from sorted_mod_4 s
 left outer join small_table st
 on s.ctinyint = st.ctinyint
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: st
+                  Statistics: Num rows: 100 Data size: 380 Basic stats: 
COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 380 Basic stats: 
COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 2027 Basic stats: 
COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 6058 Data size: 2027 Basic stats: 
COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6663 Data size: 2229 Basic stats: 
COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                  table:
+                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.*, st.*
 from sorted_mod_4 s
 left outer join small_table st
@@ -100,21 +184,111 @@ POSTHOOK: Input: default@small_table
 POSTHOOK: Input: default@sorted_mod_4
 #### A masked pattern was here ####
 6876
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
 on s.ctinyint = sm.ctinyint and s.cmodint = 2
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
 on s.ctinyint = sm.ctinyint and s.cmodint = 2
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
 #### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: sm
+                  Statistics: Num rows: 100 Data size: 380 Basic stats: 
COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 380 Basic stats: 
COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      filter predicates:
+                        0 {(_col1 = 2)}
+                        1 
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 2027 Basic stats: 
COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint), cmodint (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 6058 Data size: 2027 Basic stats: 
COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      filter predicates:
+                        0 {(_col1 = 2)}
+                        1 
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6663 Data size: 2229 Basic stats: 
COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                  table:
+                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
@@ -134,21 +308,111 @@ POSTHOOK: Input: default@small_table
 POSTHOOK: Input: default@sorted_mod_4
 #### A masked pattern was here ####
 6058
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
 on s.ctinyint = sm.ctinyint and pmod(s.ctinyint, 4) = s.cmodint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
 on s.ctinyint = sm.ctinyint and pmod(s.ctinyint, 4) = s.cmodint
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: sm
+                  Statistics: Num rows: 100 Data size: 380 Basic stats: 
COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 380 Basic stats: 
COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      filter predicates:
+                        0 {((UDFToInteger(_col0) pmod 4) = _col1)}
+                        1 
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 2027 Basic stats: 
COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint), cmodint (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 6058 Data size: 2027 Basic stats: 
COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      filter predicates:
+                        0 {((UDFToInteger(_col0) pmod 4) = _col1)}
+                        1 
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6663 Data size: 2229 Basic stats: 
COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                  table:
+                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
@@ -168,21 +432,111 @@ POSTHOOK: Input: default@small_table
 POSTHOOK: Input: default@sorted_mod_4
 #### A masked pattern was here ####
 6248
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
 on s.ctinyint = sm.ctinyint and s.ctinyint < 100
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
 on s.ctinyint = sm.ctinyint and s.ctinyint < 100
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
 #### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: sm
+                  Statistics: Num rows: 100 Data size: 380 Basic stats: 
COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 380 Basic stats: 
COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      filter predicates:
+                        0 {(_col0 < 100)}
+                        1 
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 2027 Basic stats: 
COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 6058 Data size: 2027 Basic stats: 
COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      filter predicates:
+                        0 {(_col0 < 100)}
+                        1 
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6663 Data size: 2229 Basic stats: 
COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                  table:
+                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.ctinyint, s.cmodint, sm.cbigint 
 from sorted_mod_4 s
 left outer join small_table sm
@@ -202,7 +556,7 @@ POSTHOOK: Input: default@small_table
 POSTHOOK: Input: default@sorted_mod_4
 #### A masked pattern was here ####
 6876
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.*, sm.*, s2.* 
 from sorted_mod_4 s
 left outer join small_table sm
@@ -211,7 +565,7 @@ left outer join sorted_mod_4 s2
   on s2.ctinyint = s.ctinyint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.*, sm.*, s2.* 
 from sorted_mod_4 s
 left outer join small_table sm
@@ -220,7 +574,117 @@ left outer join sorted_mod_4 s2
   on s2.ctinyint = s.ctinyint
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: sm
+                  Statistics: Num rows: 100 Data size: 380 Basic stats: 
COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cbigint (type: bigint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 380 Basic stats: 
COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 UDFToLong(_col1) (type: bigint)
+                        1 (_col0 pmod 8) (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: s2
+                  Statistics: Num rows: 6058 Data size: 2027 Basic stats: 
COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 6058 Data size: 2027 Basic stats: 
COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 _col0 (type: tinyint)
+                        1 _col0 (type: tinyint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 2027 Basic stats: 
COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: ctinyint (type: tinyint), cmodint (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 6058 Data size: 2027 Basic stats: 
COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 UDFToLong(_col1) (type: bigint)
+                        1 (_col0 pmod 8) (type: bigint)
+                      outputColumnNames: _col0
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6663 Data size: 2229 Basic stats: 
COMPLETE Column stats: NONE
+                      Map Join Operator
+                        condition map:
+                             Left Outer Join0 to 1
+                        keys:
+                          0 _col0 (type: tinyint)
+                          1 _col0 (type: tinyint)
+                        input vertices:
+                          1 Map 4
+                        Statistics: Num rows: 7329 Data size: 2451 Basic 
stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: count()
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                            value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                  table:
+                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.*, sm.*, s2.* 
 from sorted_mod_4 s
 left outer join small_table sm
@@ -306,21 +770,105 @@ POSTHOOK: query: ANALYZE TABLE small_table2 COMPUTE 
STATISTICS FOR COLUMNS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@small_table2
 #### A masked pattern was here ####
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.*, st.*
 from mod_8_mod_4 s
 left outer join small_table2 st
 on s.cmodtinyint = st.cmodtinyint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.*, st.*
 from mod_8_mod_4 s
 left outer join small_table2 st
 on s.cmodtinyint = st.cmodtinyint
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: st
+                  Statistics: Num rows: 100 Data size: 363 Basic stats: 
COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cmodtinyint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 363 Basic stats: 
COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 2757 Basic stats: 
COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cmodtinyint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 6058 Data size: 2757 Basic stats: 
COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6663 Data size: 3032 Basic stats: 
COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                  table:
+                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.*, st.*
 from mod_8_mod_4 s
 left outer join small_table2 st
@@ -340,21 +888,111 @@ POSTHOOK: Input: default@mod_8_mod_4
 POSTHOOK: Input: default@small_table2
 #### A masked pattern was here ####
 39112
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
 on s.cmodtinyint = sm.cmodtinyint and s.cmodint = 2
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
 on s.cmodtinyint = sm.cmodtinyint and s.cmodint = 2
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
 #### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: sm
+                  Statistics: Num rows: 100 Data size: 363 Basic stats: 
COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cmodtinyint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 363 Basic stats: 
COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      filter predicates:
+                        0 {(_col1 = 2)}
+                        1 
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 2757 Basic stats: 
COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cmodtinyint (type: int), cmodint (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 6058 Data size: 2757 Basic stats: 
COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      filter predicates:
+                        0 {(_col1 = 2)}
+                        1 
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6663 Data size: 3032 Basic stats: 
COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                  table:
+                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.cmodtinyint, s.cmodint, 
sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
@@ -374,21 +1012,111 @@ POSTHOOK: Input: default@mod_8_mod_4
 POSTHOOK: Input: default@small_table2
 #### A masked pattern was here ####
 11171
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
 on s.cmodtinyint = sm.cmodtinyint and pmod(s.cmodtinyint, 4) = s.cmodint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
 on s.cmodtinyint = sm.cmodtinyint and pmod(s.cmodtinyint, 4) = s.cmodint
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: sm
+                  Statistics: Num rows: 100 Data size: 363 Basic stats: 
COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cmodtinyint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 363 Basic stats: 
COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      filter predicates:
+                        0 {((_col0 pmod 4) = _col1)}
+                        1 
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 2757 Basic stats: 
COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cmodtinyint (type: int), cmodint (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 6058 Data size: 2757 Basic stats: 
COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      filter predicates:
+                        0 {((_col0 pmod 4) = _col1)}
+                        1 
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6663 Data size: 3032 Basic stats: 
COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                  table:
+                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.cmodtinyint, s.cmodint, 
sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
@@ -408,21 +1136,111 @@ POSTHOOK: Input: default@mod_8_mod_4
 POSTHOOK: Input: default@small_table2
 #### A masked pattern was here ####
 14371
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
 on s.cmodtinyint = sm.cmodtinyint and s.cmodtinyint < 3
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.cmodtinyint, s.cmodint, sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
 on s.cmodtinyint = sm.cmodtinyint and s.cmodtinyint < 3
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
 #### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: sm
+                  Statistics: Num rows: 100 Data size: 363 Basic stats: 
COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cmodtinyint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 363 Basic stats: 
COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      filter predicates:
+                        0 {(_col0 < 3)}
+                        1 
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 2757 Basic stats: 
COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cmodtinyint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 6058 Data size: 2757 Basic stats: 
COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      filter predicates:
+                        0 {(_col0 < 3)}
+                        1 
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6663 Data size: 3032 Basic stats: 
COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: count()
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                  table:
+                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.cmodtinyint, s.cmodint, 
sm.cbigint 
 from mod_8_mod_4 s
 left outer join small_table2 sm
@@ -442,7 +1260,7 @@ POSTHOOK: Input: default@mod_8_mod_4
 POSTHOOK: Input: default@small_table2
 #### A masked pattern was here ####
 17792
-PREHOOK: query: explain vectorization detail formatted
+PREHOOK: query: explain
 select count(*) from (select s.*, sm.*, s2.* 
 from mod_8_mod_4 s
 left outer join small_table2 sm
@@ -451,7 +1269,7 @@ left outer join mod_8_mod_4 s2
   on s2.cmodtinyint = s.cmodtinyint
 ) t1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail formatted
+POSTHOOK: query: explain
 select count(*) from (select s.*, sm.*, s2.* 
 from mod_8_mod_4 s
 left outer join small_table2 sm
@@ -460,7 +1278,117 @@ left outer join mod_8_mod_4 s2
   on s2.cmodtinyint = s.cmodtinyint
 ) t1
 POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: sm
+                  Statistics: Num rows: 100 Data size: 363 Basic stats: 
COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cbigint (type: bigint)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 100 Data size: 363 Basic stats: 
COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 UDFToLong(_col1) (type: bigint)
+                        1 (_col0 pmod 8) (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: s2
+                  Statistics: Num rows: 6058 Data size: 2757 Basic stats: 
COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cmodtinyint (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 6058 Data size: 2757 Basic stats: 
COMPLETE Column stats: NONE
+                    Spark HashTable Sink Operator
+                      keys:
+                        0 _col0 (type: int)
+                        1 _col0 (type: int)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 1)
 #### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: s
+                  Statistics: Num rows: 6058 Data size: 2757 Basic stats: 
COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cmodtinyint (type: int), cmodint (type: int)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 6058 Data size: 2757 Basic stats: 
COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Outer Join0 to 1
+                      keys:
+                        0 UDFToLong(_col1) (type: bigint)
+                        1 (_col0 pmod 8) (type: bigint)
+                      outputColumnNames: _col0
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6663 Data size: 3032 Basic stats: 
COMPLETE Column stats: NONE
+                      Map Join Operator
+                        condition map:
+                             Left Outer Join0 to 1
+                        keys:
+                          0 _col0 (type: int)
+                          1 _col0 (type: int)
+                        input vertices:
+                          1 Map 4
+                        Statistics: Num rows: 7329 Data size: 3335 Basic 
stats: COMPLETE Column stats: NONE
+                        Group By Operator
+                          aggregations: count()
+                          mode: hash
+                          outputColumnNames: _col0
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 8 Basic stats: 
COMPLETE Column stats: NONE
+                            value expressions: _col0 (type: bigint)
+            Execution mode: vectorized
+            Local Work:
+              Map Reduce Local Work
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
+                  table:
+                      input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: select count(*) from (select s.*, sm.*, s2.* 
 from mod_8_mod_4 s
 left outer join small_table2 sm

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out 
b/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out
index 5bc0f6e..5497426 100644
--- a/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out
@@ -97,77 +97,32 @@ POSTHOOK: Lineage: over1korc.s SIMPLE 
[(over1k)over1k.FieldSchema(name:s, type:s
 POSTHOOK: Lineage: over1korc.si SIMPLE [(over1k)over1k.FieldSchema(name:si, 
type:smallint, comment:null), ]
 POSTHOOK: Lineage: over1korc.t SIMPLE [(over1k)over1k.FieldSchema(name:t, 
type:tinyint, comment:null), ]
 POSTHOOK: Lineage: over1korc.ts SIMPLE [(over1k)over1k.FieldSchema(name:ts, 
type:timestamp, comment:null), ]
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT s AS `string`,
+PREHOOK: query: EXPLAIN SELECT s AS `string`,
        CONCAT(CONCAT('      ',s),'      ') AS `none_padded_str`,
        CONCAT(CONCAT('|',RTRIM(CONCAT(CONCAT('      ',s),'      '))),'|') AS 
`none_z_rtrim_str`
        FROM over1korc LIMIT 20
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION SELECT s AS `string`,
+POSTHOOK: query: EXPLAIN SELECT s AS `string`,
        CONCAT(CONCAT('      ',s),'      ') AS `none_padded_str`,
        CONCAT(CONCAT('|',RTRIM(CONCAT(CONCAT('      ',s),'      '))),'|') AS 
`none_z_rtrim_str`
        FROM over1korc LIMIT 20
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Spark
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: over1korc
-                  Statistics: Num rows: 1049 Data size: 311170 Basic stats: 
COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10]
-                  Select Operator
-                    expressions: s (type: string), concat(concat('      ', s), 
'      ') (type: string), concat(concat('|', rtrim(concat(concat('      ', s), 
'      '))), '|') (type: string)
-                    outputColumnNames: _col0, _col1, _col2
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [7, 12, 11]
-                        selectExpressions: 
StringGroupColConcatStringScalar(col 11, val       )(children: 
StringScalarConcatStringGroupCol(val       , col 7) -> 11:String_Family) -> 
12:String_Family, StringGroupColConcatStringScalar(col 13, val |)(children: 
StringScalarConcatStringGroupCol(val |, col 11)(children: StringRTrim(col 
13)(children: StringGroupColConcatStringScalar(col 11, val       )(children: 
StringScalarConcatStringGroupCol(val       , col 7) -> 11:String_Family) -> 
13:String_Family) -> 11:String) -> 13:String_Family) -> 11:String_Family
-                    Statistics: Num rows: 1049 Data size: 311170 Basic stats: 
COMPLETE Column stats: NONE
-                    Limit
-                      Number of rows: 20
-                      Limit Vectorization:
-                          className: VectorLimitOperator
-                          native: true
-                      Statistics: Num rows: 20 Data size: 5920 Basic stats: 
COMPLETE Column stats: NONE
-                      File Output Operator
-                        compressed: false
-                        File Sink Vectorization:
-                            className: VectorFileSinkOperator
-                            native: false
-                        Statistics: Num rows: 20 Data size: 5920 Basic stats: 
COMPLETE Column stats: NONE
-                        table:
-                            input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-
   Stage: Stage-0
     Fetch Operator
       limit: 20
       Processor Tree:
-        ListSink
+        TableScan
+          alias: over1korc
+          Select Operator
+            expressions: s (type: string), concat(concat('      ', s), '      
') (type: string), concat(concat('|', rtrim(concat(concat('      ', s), '      
'))), '|') (type: string)
+            outputColumnNames: _col0, _col1, _col2
+            Limit
+              Number of rows: 20
+              ListSink
 
 PREHOOK: query: SELECT s AS `string`,
        CONCAT(CONCAT('      ',s),'      ') AS `none_padded_str`,
@@ -310,24 +265,20 @@ POSTHOOK: Lineage: vectortab2korc.si SIMPLE 
[(vectortab2k)vectortab2k.FieldSchem
 POSTHOOK: Lineage: vectortab2korc.t SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ]
-PREHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+PREHOOK: query: EXPLAIN
 SELECT CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS 
INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) AS `field`
     FROM vectortab2korc 
     GROUP BY CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 
AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING))
     ORDER BY `field`
     LIMIT 50
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN VECTORIZATION EXPRESSION
+POSTHOOK: query: EXPLAIN
 SELECT CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 AS 
INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING)) AS `field`
     FROM vectortab2korc 
     GROUP BY CONCAT(CONCAT(CONCAT('Quarter ',CAST(CAST((MONTH(dt) - 1) / 3 + 1 
AS INT) AS STRING)),'-'),CAST(YEAR(dt) AS STRING))
     ORDER BY `field`
     LIMIT 50
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -345,25 +296,11 @@ STAGE PLANS:
                 TableScan
                   alias: vectortab2korc
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: 
COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12]
                   Select Operator
                     expressions: concat(concat(concat('Quarter ', 
UDFToString(UDFToInteger(((UDFToDouble((month(dt) - 1)) / 3.0) + 1.0)))), '-'), 
UDFToString(year(dt))) (type: string)
                     outputColumnNames: _col0
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [19]
-                        selectExpressions: StringGroupConcatColCol(col 17, col 
18)(children: StringGroupColConcatStringScalar(col 18, val -)(children: 
StringScalarConcatStringGroupCol(val Quarter , col 17)(children: 
CastLongToString(col 13)(children: CastDoubleToLong(col 15)(children: 
DoubleColAddDoubleScalar(col 16, val 1.0)(children: 
DoubleColDivideDoubleScalar(col 15, val 3.0)(children: CastLongToDouble(col 
14)(children: LongColSubtractLongScalar(col 13, val 1)(children: 
VectorUDFMonthDate(col 12, field MONTH) -> 13:long) -> 14:long) -> 15:double) 
-> 16:double) -> 15:double) -> 13:long) -> 17:String) -> 18:String_Family) -> 
17:String_Family, CastLongToString(col 13)(children: VectorUDFYearDate(col 12, 
field YEAR) -> 13:long) -> 18:String) -> 19:String_Family
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
-                      Group By Vectorization:
-                          className: VectorGroupByOperator
-                          vectorOutput: true
-                          keyExpressions: col 19
-                          native: false
-                          projectedOutputColumns: []
                       keys: _col0 (type: string)
                       mode: hash
                       outputColumnNames: _col0
@@ -372,39 +309,13 @@ STAGE PLANS:
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Reduce Sink Vectorization:
-                            className: VectorReduceSinkOperator
-                            native: false
-                            nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS 
true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, 
LazyBinarySerDe for values IS true
-                            nativeConditionsNotMet: No TopN IS false, Uniform 
Hash IS false
                         Statistics: Num rows: 2000 Data size: 918712 Basic 
stats: COMPLETE Column stats: NONE
                         TopN Hash Memory Usage: 0.1
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
         Reducer 2 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Group By Operator
-                Group By Vectorization:
-                    className: VectorGroupByOperator
-                    vectorOutput: true
-                    keyExpressions: col 0
-                    native: false
-                    projectedOutputColumns: []
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0
@@ -412,42 +323,20 @@ STAGE PLANS:
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
-                  Reduce Sink Vectorization:
-                      className: VectorReduceSinkOperator
-                      native: false
-                      nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
spark IN [tez, spark] IS true, Not ACID UPDATE or DELETE IS true, No buckets IS 
true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, 
LazyBinarySerDe for values IS true
-                      nativeConditionsNotMet: No TopN IS false, Uniform Hash 
IS false
                   Statistics: Num rows: 1000 Data size: 459356 Basic stats: 
COMPLETE Column stats: NONE
                   TopN Hash Memory Usage: 0.1
         Reducer 3 
             Execution mode: vectorized
-            Reduce Vectorization:
-                enabled: true
-                enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine spark IN [tez, spark] IS true
-                groupByVectorOutput: true
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string)
                 outputColumnNames: _col0
-                Select Vectorization:
-                    className: VectorSelectOperator
-                    native: true
-                    projectedOutputColumns: [0]
                 Statistics: Num rows: 1000 Data size: 459356 Basic stats: 
COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 50
-                  Limit Vectorization:
-                      className: VectorLimitOperator
-                      native: true
                   Statistics: Num rows: 50 Data size: 22950 Basic stats: 
COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    File Sink Vectorization:
-                        className: VectorFileSinkOperator
-                        native: false
                     Statistics: Num rows: 50 Data size: 22950 Basic stats: 
COMPLETE Column stats: NONE
                     table:
                         input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad6ce078/ql/src/test/results/clientpositive/spark/vector_varchar_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_varchar_4.q.out 
b/ql/src/test/results/clientpositive/spark/vector_varchar_4.q.out
index 1c8e479..9a46ee1 100644
--- a/ql/src/test/results/clientpositive/spark/vector_varchar_4.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_varchar_4.q.out
@@ -121,16 +121,12 @@ POSTHOOK: query: create table 
varchar_lazy_binary_columnar(vt varchar(10), vsi v
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@varchar_lazy_binary_columnar
-PREHOOK: query: explain vectorization expression
+PREHOOK: query: explain
 insert overwrite table varchar_lazy_binary_columnar select t, si, i, b, f, d, 
s from vectortab2korc
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization expression
+POSTHOOK: query: explain
 insert overwrite table varchar_lazy_binary_columnar select t, si, i, b, f, d, 
s from vectortab2korc
 POSTHOOK: type: QUERY
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -146,23 +142,12 @@ STAGE PLANS:
                 TableScan
                   alias: vectortab2korc
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: 
COMPLETE Column stats: NONE
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12]
                   Select Operator
                     expressions: CAST( t AS varchar(10)) (type: varchar(10)), 
CAST( si AS varchar(10)) (type: varchar(10)), CAST( i AS varchar(20)) (type: 
varchar(20)), CAST( b AS varchar(30)) (type: varchar(30)), CAST( f AS 
varchar(20)) (type: varchar(20)), CAST( d AS varchar(20)) (type: varchar(20)), 
CAST( s AS varchar(50)) (type: varchar(50))
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [13, 14, 15, 16, 17, 18, 19]
-                        selectExpressions: CastLongToVarChar(col 0, maxLength 
10) -> 13:VarChar, CastLongToVarChar(col 1, maxLength 10) -> 14:VarChar, 
CastLongToVarChar(col 2, maxLength 20) -> 15:VarChar, CastLongToVarChar(col 3, 
maxLength 30) -> 16:VarChar, VectorUDFAdaptor(CAST( f AS varchar(20))) -> 
17:varchar(20), VectorUDFAdaptor(CAST( d AS varchar(20))) -> 18:varchar(20), 
CastStringGroupToVarChar(col 8, maxLength 50) -> 19:VarChar
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: 
COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
                       Statistics: Num rows: 2000 Data size: 918712 Basic 
stats: COMPLETE Column stats: NONE
                       table:
                           input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
@@ -170,14 +155,6 @@ STAGE PLANS:
                           serde: 
org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe
                           name: default.varchar_lazy_binary_columnar
             Execution mode: vectorized
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
-                groupByVectorOutput: true
-                inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: true
-                vectorized: true
 
   Stage: Stage-0
     Move Operator

Reply via email to