http://git-wip-us.apache.org/repos/asf/hive/blob/16d28b34/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex.q.out
 
b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex.q.out
index 0a01b8c..1511298 100644
--- 
a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex.q.out
+++ 
b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_complex.q.out
@@ -149,25 +149,55 @@ POSTHOOK: Lineage: part_change_various_various_struct1 
PARTITION(part=1).b SIMPL
 POSTHOOK: Lineage: part_change_various_various_struct1 
PARTITION(part=1).insert_num SIMPLE 
[(complex_struct1_c_txt)complex_struct1_c_txt.FieldSchema(name:insert_num, 
type:int, comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_struct1 PARTITION(part=1).s1 
SIMPLE [(complex_struct1_c_txt)complex_struct1_c_txt.FieldSchema(name:s1, 
type:struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>,
 comment:null), ]
 complex_struct1_c_txt.insert_num       complex_struct1_c_txt.s1        
complex_struct1_c_txt.b
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization detail
 select insert_num,part,s1,b from part_change_various_various_struct1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization detail
 select insert_num,part,s1,b from part_change_various_various_struct1
 POSTHOOK: type: QUERY
 Explain
-Plan optimized by CBO.
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
 
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Map 1 llap
-      File Output Operator [FS_2]
-        Select Operator [SEL_1] (rows=6 width=4)
-          Output:["_col0","_col1","_col2","_col3"]
-          TableScan [TS_0] (rows=6 width=789)
-            
default@part_change_various_various_struct1,part_change_various_various_struct1,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","s1","b"]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: part_change_various_various_struct1
+                  Statistics: Num rows: 6 Data size: 4734 Basic stats: 
COMPLETE Column stats: PARTIAL
+                  Select Operator
+                    expressions: insert_num (type: int), part (type: int), s1 
(type: 
struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>),
 b (type: string)
+                    outputColumnNames: _col0, _col1, _col2, _col3
+                    Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
+                      table:
+                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
+                inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                notVectorizedReason: Select expression for SELECT operator: 
Data type 
struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>
 of Column[s1] not supported
+                vectorized: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select insert_num,part,s1,b from 
part_change_various_various_struct1
 PREHOOK: type: QUERY
@@ -413,25 +443,55 @@ POSTHOOK: Lineage: part_add_various_various_struct2 
PARTITION(part=1).b SIMPLE [
 POSTHOOK: Lineage: part_add_various_various_struct2 
PARTITION(part=1).insert_num SIMPLE 
[(complex_struct2_d_txt)complex_struct2_d_txt.FieldSchema(name:insert_num, 
type:int, comment:null), ]
 POSTHOOK: Lineage: part_add_various_various_struct2 PARTITION(part=1).s2 
SIMPLE [(complex_struct2_d_txt)complex_struct2_d_txt.FieldSchema(name:s2, 
type:struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>,
 comment:null), ]
 complex_struct2_d_txt.insert_num       complex_struct2_d_txt.b 
complex_struct2_d_txt.s2
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization detail
 select insert_num,part,b,s2 from part_add_various_various_struct2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization detail
 select insert_num,part,b,s2 from part_add_various_various_struct2
 POSTHOOK: type: QUERY
 Explain
-Plan optimized by CBO.
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
 
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Map 1 llap
-      File Output Operator [FS_2]
-        Select Operator [SEL_1] (rows=8 width=4)
-          Output:["_col0","_col1","_col2","_col3"]
-          TableScan [TS_0] (rows=8 width=614)
-            
default@part_add_various_various_struct2,part_add_various_various_struct2,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","b","s2"]
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: part_add_various_various_struct2
+                  Statistics: Num rows: 8 Data size: 4912 Basic stats: 
COMPLETE Column stats: PARTIAL
+                  Select Operator
+                    expressions: insert_num (type: int), part (type: int), b 
(type: string), s2 (type: 
struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>)
+                    outputColumnNames: _col0, _col1, _col2, _col3
+                    Statistics: Num rows: 8 Data size: 32 Basic stats: 
COMPLETE Column stats: PARTIAL
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 8 Data size: 32 Basic stats: 
COMPLETE Column stats: PARTIAL
+                      table:
+                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
+                inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                notVectorizedReason: Select expression for SELECT operator: 
Data type 
struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>
 of Column[s2] not supported
+                vectorized: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select insert_num,part,b,s2 from 
part_add_various_various_struct2
 PREHOOK: type: QUERY
@@ -601,25 +661,55 @@ POSTHOOK: Lineage: part_add_to_various_various_struct4 
PARTITION(part=1).b SIMPL
 POSTHOOK: Lineage: part_add_to_various_various_struct4 
PARTITION(part=1).insert_num SIMPLE 
[(complex_struct4_c_txt)complex_struct4_c_txt.FieldSchema(name:insert_num, 
type:int, comment:null), ]
 POSTHOOK: Lineage: part_add_to_various_various_struct4 PARTITION(part=1).s3 
SIMPLE [(complex_struct4_c_txt)complex_struct4_c_txt.FieldSchema(name:s3, 
type:struct<c1:boolean,c2:tinyint,c3:smallint,c4:int,c5:bigint,c6:float,c7:double,c8:decimal(38,18),c9:char(25),c10:varchar(25),c11:timestamp,c12:date,c13:binary>,
 comment:null), ]
 complex_struct4_c_txt.insert_num       complex_struct4_c_txt.b 
complex_struct4_c_txt.s3
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization detail
 select insert_num,part,b,s3 from part_add_to_various_various_struct4
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization detail
 select insert_num,part,b,s3 from part_add_to_various_various_struct4
 POSTHOOK: type: QUERY
 Explain
-Plan optimized by CBO.
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: part_add_to_various_various_struct4
+                  Statistics: Num rows: 4 Data size: 1172 Basic stats: 
COMPLETE Column stats: PARTIAL
+                  Select Operator
+                    expressions: insert_num (type: int), part (type: int), b 
(type: string), s3 (type: 
struct<c1:boolean,c2:tinyint,c3:smallint,c4:int,c5:bigint,c6:float,c7:double,c8:decimal(38,18),c9:char(25),c10:varchar(25),c11:timestamp,c12:date,c13:binary>)
+                    outputColumnNames: _col0, _col1, _col2, _col3
+                    Statistics: Num rows: 4 Data size: 16 Basic stats: 
COMPLETE Column stats: PARTIAL
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 4 Data size: 16 Basic stats: 
COMPLETE Column stats: PARTIAL
+                      table:
+                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: llap
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
+                inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                notVectorizedReason: Select expression for SELECT operator: 
Data type 
struct<c1:boolean,c2:tinyint,c3:smallint,c4:int,c5:bigint,c6:float,c7:double,c8:decimal(38,18),c9:char(25),c10:varchar(25),c11:timestamp,c12:date,c13:binary>
 of Column[s3] not supported
+                vectorized: false
 
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Map 1 llap
-      File Output Operator [FS_2]
-        Select Operator [SEL_1] (rows=4 width=4)
-          Output:["_col0","_col1","_col2","_col3"]
-          TableScan [TS_0] (rows=4 width=293)
-            
default@part_add_to_various_various_struct4,part_add_to_various_various_struct4,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","b","s3"]
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select insert_num,part,b,s3 from 
part_add_to_various_various_struct4
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/16d28b34/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive.q.out
 
b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive.q.out
index d240f82..c89be06 100644
--- 
a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive.q.out
+++ 
b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_part_all_primitive.q.out
@@ -278,25 +278,73 @@ POSTHOOK: Lineage: 
part_change_various_various_boolean_to_bigint PARTITION(part=
 POSTHOOK: Lineage: part_change_various_various_boolean_to_bigint 
PARTITION(part=1).c9 SIMPLE 
[(schema_evolution_data)schema_evolution_data.FieldSchema(name:boolean1, 
type:boolean, comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_boolean_to_bigint 
PARTITION(part=1).insert_num SIMPLE 
[(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, 
type:int, comment:null), ]
 insert_num     boolean1        boolean1        boolean1        boolean1        
boolean1        boolean1        boolean1        boolean1        boolean1        
tinyint1        tinyint1        tinyint1        tinyint1        tinyint1        
tinyint1        tinyint1        tinyint1        tinyint1        tinyint1        
tinyint1        smallint1       smallint1       smallint1       smallint1       
smallint1       smallint1       smallint1       smallint1       smallint1       
smallint1       smallint1       int1    int1    int1    int1    int1    int1    
int1    int1    int1    int1    int1    bigint1 bigint1 bigint1 bigint1 bigint1 
bigint1 bigint1 bigint1 bigint1 bigint1 bigint1 _c54
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization detail
 select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b
 from part_change_various_various_boolean_to_bigint
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization detail
 select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b
 from part_change_various_various_boolean_to_bigint
 POSTHOOK: type: QUERY
 Explain
-Plan optimized by CBO.
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
 
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Map 1 vectorized, llap
-      File Output Operator [FS_4]
-        Select Operator [SEL_3] (rows=10 width=4)
-          
Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41","_col42","_col43","_col44","_col45","_col46","_col47","_col48","_col49","_col50","_col51","_col52","_col53","_col54","_col55"]
-          TableScan [TS_0] (rows=10 width=1168)
-            
default@part_change_various_various_boolean_to_bigint,part_change_various_various_boolean_to_bigint,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","c19","c20","c21","c22","c23","c24","c25","c26","c27","c28","c29","c30","c31","c32","c33","c34","c35","c36","c37","c38","c39","c40","c41","c42","c43","c44","c45","c46","c47","c48","c49","c50","c51","c52","c53","b"]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: part_change_various_various_boolean_to_bigint
+                  Statistics: Num rows: 10 Data size: 11688 Basic stats: 
COMPLETE Column stats: PARTIAL
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 
30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 
50, 51, 52, 53, 54, 55]
+                  Select Operator
+                    expressions: insert_num (type: int), part (type: int), c1 
(type: boolean), c2 (type: boolean), c3 (type: boolean), c4 (type: boolean), c5 
(type: boolean), c6 (type: boolean), c7 (type: boolean), c8 (type: boolean), c9 
(type: boolean), c10 (type: tinyint), c11 (type: tinyint), c12 (type: tinyint), 
c13 (type: tinyint), c14 (type: tinyint), c15 (type: tinyint), c16 (type: 
tinyint), c17 (type: tinyint), c18 (type: tinyint), c19 (type: tinyint), c20 
(type: tinyint), c21 (type: smallint), c22 (type: smallint), c23 (type: 
smallint), c24 (type: smallint), c25 (type: smallint), c26 (type: smallint), 
c27 (type: smallint), c28 (type: smallint), c29 (type: smallint), c30 (type: 
smallint), c31 (type: smallint), c32 (type: int), c33 (type: int), c34 (type: 
int), c35 (type: int), c36 (type: int), c37 (type: int), c38 (type: int), c39 
(type: int), c40 (type: int), c41 (type: int), c42 (type: int), c43 (type: 
bigint), c44 (type: bigint), c45 (type: bigint), c46 (type: bigint), c4
 7 (type: bigint), c48 (type: bigint), c49 (type: bigint), c50 (type: bigint), 
c51 (type: bigint), c52 (type: bigint), c53 (type: bigint), b (type: string)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, 
_col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, 
_col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, 
_col35, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44, 
_col45, _col46, _col47, _col48, _col49, _col50, _col51, _col52, _col53, _col54, 
_col55
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumns: [0, 55, 1, 2, 3, 4, 5, 6, 7, 
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 
28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 
48, 49, 50, 51, 52, 53, 54]
+                    Statistics: Num rows: 10 Data size: 40 Basic stats: 
COMPLETE Column stats: PARTIAL
+                    File Output Operator
+                      compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
+                      Statistics: Num rows: 10 Data size: 40 Basic stats: 
COMPLETE Column stats: PARTIAL
+                      table:
+                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
+                groupByVectorOutput: true
+                inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 55
+                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 
33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 
53, 54]
+                    dataColumns: insert_num:int, c1:boolean, c2:boolean, 
c3:boolean, c4:boolean, c5:boolean, c6:boolean, c7:boolean, c8:boolean, 
c9:boolean, c10:tinyint, c11:tinyint, c12:tinyint, c13:tinyint, c14:tinyint, 
c15:tinyint, c16:tinyint, c17:tinyint, c18:tinyint, c19:tinyint, c20:tinyint, 
c21:smallint, c22:smallint, c23:smallint, c24:smallint, c25:smallint, 
c26:smallint, c27:smallint, c28:smallint, c29:smallint, c30:smallint, 
c31:smallint, c32:int, c33:int, c34:int, c35:int, c36:int, c37:int, c38:int, 
c39:int, c40:int, c41:int, c42:int, c43:bigint, c44:bigint, c45:bigint, 
c46:bigint, c47:bigint, c48:bigint, c49:bigint, c50:bigint, c51:bigint, 
c52:bigint, c53:bigint, b:string
+                    partitionColumnCount: 1
+                    partitionColumns: part:int
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b
 from part_change_various_various_boolean_to_bigint
 PREHOOK: type: QUERY
@@ -493,25 +541,73 @@ POSTHOOK: Lineage: 
part_change_various_various_decimal_to_double PARTITION(part=
 POSTHOOK: Lineage: part_change_various_various_decimal_to_double 
PARTITION(part=1).c9 SIMPLE 
[(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:decimal1, 
type:decimal(38,18), comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_decimal_to_double 
PARTITION(part=1).insert_num SIMPLE 
[(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:insert_num, 
type:int, comment:null), ]
 insert_num     decimal1        decimal1        decimal1        decimal1        
decimal1        decimal1        decimal1        decimal1        decimal1        
decimal1        decimal1        float1  float1  float1  float1  float1  float1  
float1  float1  float1  float1  float1  double1 double1 double1 double1 double1 
double1 double1 double1 double1 double1 double1 _c34
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization detail
 select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b
 from part_change_various_various_decimal_to_double
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization detail
 select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b
 from part_change_various_various_decimal_to_double
 POSTHOOK: type: QUERY
 Explain
-Plan optimized by CBO.
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
 
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Map 1 vectorized, llap
-      File Output Operator [FS_4]
-        Select Operator [SEL_3] (rows=6 width=4)
-          
Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35"]
-          TableScan [TS_0] (rows=6 width=1382)
-            
default@part_change_various_various_decimal_to_double,part_change_various_various_decimal_to_double,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","c19","c20","c21","c22","c23","c24","c25","c26","c27","c28","c29","c30","c31","c32","c33","b"]
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: part_change_various_various_decimal_to_double
+                  Statistics: Num rows: 6 Data size: 8295 Basic stats: 
COMPLETE Column stats: PARTIAL
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 
30, 31, 32, 33, 34, 35]
+                  Select Operator
+                    expressions: insert_num (type: int), part (type: int), c1 
(type: decimal(38,18)), c2 (type: decimal(38,18)), c3 (type: decimal(38,18)), 
c4 (type: decimal(38,18)), c5 (type: decimal(38,18)), c6 (type: 
decimal(38,18)), c7 (type: decimal(38,18)), c8 (type: decimal(38,18)), c9 
(type: decimal(38,18)), c10 (type: decimal(38,18)), c11 (type: decimal(38,18)), 
c12 (type: float), c13 (type: float), c14 (type: float), c15 (type: float), c16 
(type: float), c17 (type: float), c18 (type: float), c19 (type: float), c20 
(type: float), c21 (type: float), c22 (type: float), c23 (type: double), c24 
(type: double), c25 (type: double), c26 (type: double), c27 (type: double), c28 
(type: double), c29 (type: double), c30 (type: double), c31 (type: double), c32 
(type: double), c33 (type: double), b (type: string)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, 
_col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, 
_col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, 
_col35
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumns: [0, 35, 1, 2, 3, 4, 5, 6, 7, 
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 
28, 29, 30, 31, 32, 33, 34]
+                    Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
+                    File Output Operator
+                      compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
+                      Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
+                      table:
+                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
+                groupByVectorOutput: true
+                inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 35
+                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 
33, 34]
+                    dataColumns: insert_num:int, c1:decimal(38,18), 
c2:decimal(38,18), c3:decimal(38,18), c4:decimal(38,18), c5:decimal(38,18), 
c6:decimal(38,18), c7:decimal(38,18), c8:decimal(38,18), c9:decimal(38,18), 
c10:decimal(38,18), c11:decimal(38,18), c12:float, c13:float, c14:float, 
c15:float, c16:float, c17:float, c18:float, c19:float, c20:float, c21:float, 
c22:float, c23:double, c24:double, c25:double, c26:double, c27:double, 
c28:double, c29:double, c30:double, c31:double, c32:double, c33:double, b:string
+                    partitionColumnCount: 1
+                    partitionColumns: part:int
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b
 from part_change_various_various_decimal_to_double
 PREHOOK: type: QUERY
@@ -624,25 +720,73 @@ POSTHOOK: Lineage: part_change_various_various_timestamp 
PARTITION(part=1).c8 SI
 POSTHOOK: Lineage: part_change_various_various_timestamp PARTITION(part=1).c9 
SIMPLE 
[(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:timestamp1, 
type:timestamp, comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_timestamp 
PARTITION(part=1).insert_num SIMPLE 
[(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:insert_num, 
type:int, comment:null), ]
 insert_num     timestamp1      timestamp1      timestamp1      timestamp1      
timestamp1      timestamp1      timestamp1      timestamp1      timestamp1      
timestamp1      timestamp1      timestamp1      _c13
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization detail
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from 
part_change_various_various_timestamp
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization detail
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from 
part_change_various_various_timestamp
 POSTHOOK: type: QUERY
 Explain
-Plan optimized by CBO.
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
 
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Map 1 vectorized, llap
-      File Output Operator [FS_4]
-        Select Operator [SEL_3] (rows=6 width=4)
-          
Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14"]
-          TableScan [TS_0] (rows=6 width=494)
-            
default@part_change_various_various_timestamp,part_change_various_various_timestamp,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","b"]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: part_change_various_various_timestamp
+                  Statistics: Num rows: 6 Data size: 2965 Basic stats: 
COMPLETE Column stats: PARTIAL
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12, 13, 14]
+                  Select Operator
+                    expressions: insert_num (type: int), part (type: int), c1 
(type: timestamp), c2 (type: timestamp), c3 (type: timestamp), c4 (type: 
timestamp), c5 (type: timestamp), c6 (type: timestamp), c7 (type: timestamp), 
c8 (type: timestamp), c9 (type: timestamp), c10 (type: timestamp), c11 (type: 
timestamp), c12 (type: timestamp), b (type: string)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumns: [0, 14, 1, 2, 3, 4, 5, 6, 7, 
8, 9, 10, 11, 12, 13]
+                    Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
+                    File Output Operator
+                      compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
+                      Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
+                      table:
+                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
+                groupByVectorOutput: true
+                inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 14
+                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 
13]
+                    dataColumns: insert_num:int, c1:timestamp, c2:timestamp, 
c3:timestamp, c4:timestamp, c5:timestamp, c6:timestamp, c7:timestamp, 
c8:timestamp, c9:timestamp, c10:timestamp, c11:timestamp, c12:timestamp, 
b:string
+                    partitionColumnCount: 1
+                    partitionColumns: part:int
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from 
part_change_various_various_timestamp
 PREHOOK: type: QUERY
@@ -739,25 +883,73 @@ POSTHOOK: Lineage: part_change_various_various_date 
PARTITION(part=1).c3 SIMPLE
 POSTHOOK: Lineage: part_change_various_various_date PARTITION(part=1).c4 
SIMPLE 
[(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:date1, 
type:date, comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_date 
PARTITION(part=1).insert_num SIMPLE 
[(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:insert_num, 
type:int, comment:null), ]
 insert_num     date1   date1   date1   date1   _c5
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization detail
 select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization detail
 select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date
 POSTHOOK: type: QUERY
 Explain
-Plan optimized by CBO.
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: part_change_various_various_date
+                  Statistics: Num rows: 6 Data size: 2444 Basic stats: 
COMPLETE Column stats: PARTIAL
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6]
+                  Select Operator
+                    expressions: insert_num (type: int), part (type: int), c1 
(type: date), c2 (type: date), c3 (type: date), c4 (type: date), b (type: 
string)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumns: [0, 6, 1, 2, 3, 4, 5]
+                    Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
+                    File Output Operator
+                      compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
+                      Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
+                      table:
+                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
+                groupByVectorOutput: true
+                inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 6
+                    includeColumns: [0, 1, 2, 3, 4, 5]
+                    dataColumns: insert_num:int, c1:date, c2:date, c3:date, 
c4:date, b:string
+                    partitionColumnCount: 1
+                    partitionColumns: part:int
 
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Map 1 vectorized, llap
-      File Output Operator [FS_4]
-        Select Operator [SEL_3] (rows=6 width=4)
-          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
-          TableScan [TS_0] (rows=6 width=407)
-            
default@part_change_various_various_date,part_change_various_various_date,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","b"]
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,b from 
part_change_various_various_date
 PREHOOK: type: QUERY
@@ -935,25 +1127,73 @@ POSTHOOK: Lineage: 
part_change_same_type_different_params PARTITION(part=2).c5 S
 POSTHOOK: Lineage: part_change_same_type_different_params PARTITION(part=2).c6 
SIMPLE [(same_type1_c_txt)same_type1_c_txt.FieldSchema(name:c6, 
type:decimal(25,15), comment:null), ]
 POSTHOOK: Lineage: part_change_same_type_different_params 
PARTITION(part=2).insert_num SIMPLE 
[(same_type1_c_txt)same_type1_c_txt.FieldSchema(name:insert_num, type:int, 
comment:null), ]
 same_type1_c_txt.insert_num    same_type1_c_txt.c1     same_type1_c_txt.c2     
same_type1_c_txt.c3     same_type1_c_txt.c4     same_type1_c_txt.c5     
same_type1_c_txt.c6     same_type1_c_txt.b
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization detail
 select insert_num,part,c1,c2,c3,c4,c5,c6,b from 
part_change_same_type_different_params
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization detail
 select insert_num,part,c1,c2,c3,c4,c5,c6,b from 
part_change_same_type_different_params
 POSTHOOK: type: QUERY
 Explain
-Plan optimized by CBO.
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: part_change_same_type_different_params
+                  Statistics: Num rows: 13 Data size: 8736 Basic stats: 
COMPLETE Column stats: PARTIAL
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8]
+                  Select Operator
+                    expressions: insert_num (type: int), part (type: int), c1 
(type: char(8)), c2 (type: char(32)), c3 (type: varchar(15)), c4 (type: 
varchar(18)), c5 (type: decimal(10,2)), c6 (type: decimal(25,15)), b (type: 
string)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumns: [0, 8, 1, 2, 3, 4, 5, 6, 7]
+                    Statistics: Num rows: 13 Data size: 52 Basic stats: 
COMPLETE Column stats: PARTIAL
+                    File Output Operator
+                      compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
+                      Statistics: Num rows: 13 Data size: 52 Basic stats: 
COMPLETE Column stats: PARTIAL
+                      table:
+                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
+                groupByVectorOutput: true
+                inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 8
+                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7]
+                    dataColumns: insert_num:int, c1:char(8), c2:char(32), 
c3:varchar(15), c4:varchar(18), c5:decimal(10,2), c6:decimal(25,15), b:string
+                    partitionColumnCount: 1
+                    partitionColumns: part:int
 
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Map 1 vectorized, llap
-      File Output Operator [FS_4]
-        Select Operator [SEL_3] (rows=13 width=4)
-          
Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"]
-          TableScan [TS_0] (rows=13 width=672)
-            
default@part_change_same_type_different_params,part_change_same_type_different_params,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","b"]
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,b from 
part_change_same_type_different_params
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/16d28b34/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table.q.out 
b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table.q.out
index 5e99743..5ef34e5 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_orc_vec_table.q.out
@@ -83,25 +83,72 @@ POSTHOOK: Lineage: table_add_int_permute_select.b SIMPLE 
[(values__tmp__table__1
 POSTHOOK: Lineage: table_add_int_permute_select.c EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, 
type:string, comment:), ]
 POSTHOOK: Lineage: table_add_int_permute_select.insert_num EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
 _col0  _col1   _col2   _col3
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization detail
 select insert_num,a,b from table_add_int_permute_select
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization detail
 select insert_num,a,b from table_add_int_permute_select
 POSTHOOK: type: QUERY
 Explain
-Plan optimized by CBO.
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
 
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Map 1 vectorized, llap
-      File Output Operator [FS_4]
-        Select Operator [SEL_3] (rows=5 width=99)
-          Output:["_col0","_col1","_col2"]
-          TableScan [TS_0] (rows=5 width=99)
-            
default@table_add_int_permute_select,table_add_int_permute_select,Tbl:COMPLETE,Col:NONE,Output:["insert_num","a","b"]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: table_add_int_permute_select
+                  Statistics: Num rows: 5 Data size: 496 Basic stats: COMPLETE 
Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1, 2, 3]
+                  Select Operator
+                    expressions: insert_num (type: int), a (type: int), b 
(type: string)
+                    outputColumnNames: _col0, _col1, _col2
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumns: [0, 1, 2]
+                    Statistics: Num rows: 5 Data size: 496 Basic stats: 
COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
+                      Statistics: Num rows: 5 Data size: 496 Basic stats: 
COMPLETE Column stats: NONE
+                      table:
+                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
+                groupByVectorOutput: true
+                inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 4
+                    includeColumns: [0, 1, 2]
+                    dataColumns: insert_num:int, a:int, b:string, c:int
+                    partitionColumnCount: 0
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: -- SELECT permutation columns to make sure NULL defaulting 
works right
 select insert_num,a,b from table_add_int_permute_select
@@ -208,25 +255,72 @@ POSTHOOK: Lineage: table_add_int_string_permute_select.c 
EXPRESSION [(values__tm
 POSTHOOK: Lineage: table_add_int_string_permute_select.d SIMPLE 
[(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col5, 
type:string, comment:), ]
 POSTHOOK: Lineage: table_add_int_string_permute_select.insert_num EXPRESSION 
[(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
 _col0  _col1   _col2   _col3   _col4
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization detail
 select insert_num,a,b from table_add_int_string_permute_select
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization detail
 select insert_num,a,b from table_add_int_string_permute_select
 POSTHOOK: type: QUERY
 Explain
-Plan optimized by CBO.
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
 
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Map 1 vectorized, llap
-      File Output Operator [FS_4]
-        Select Operator [SEL_3] (rows=5 width=99)
-          Output:["_col0","_col1","_col2"]
-          TableScan [TS_0] (rows=5 width=99)
-            
default@table_add_int_string_permute_select,table_add_int_string_permute_select,Tbl:COMPLETE,Col:NONE,Output:["insert_num","a","b"]
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: table_add_int_string_permute_select
+                  Statistics: Num rows: 5 Data size: 496 Basic stats: COMPLETE 
Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1, 2, 3, 4]
+                  Select Operator
+                    expressions: insert_num (type: int), a (type: int), b 
(type: string)
+                    outputColumnNames: _col0, _col1, _col2
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumns: [0, 1, 2]
+                    Statistics: Num rows: 5 Data size: 496 Basic stats: 
COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
+                      Statistics: Num rows: 5 Data size: 496 Basic stats: 
COMPLETE Column stats: NONE
+                      table:
+                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
+                groupByVectorOutput: true
+                inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 5
+                    includeColumns: [0, 1, 2]
+                    dataColumns: insert_num:int, a:int, b:string, c:int, 
d:string
+                    partitionColumnCount: 0
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: -- SELECT permutation columns to make sure NULL defaulting 
works right
 select insert_num,a,b from table_add_int_string_permute_select
@@ -403,25 +497,72 @@ POSTHOOK: Lineage: table_change_string_group_double.c2 
EXPRESSION [(values__tmp_
 POSTHOOK: Lineage: table_change_string_group_double.c3 EXPRESSION 
[(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col4, 
type:string, comment:), ]
 POSTHOOK: Lineage: table_change_string_group_double.insert_num EXPRESSION 
[(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
 _col0  _col1   _col2   _col3   _col4
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization detail
 select insert_num,c1,c2,c3,b from table_change_string_group_double
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization detail
 select insert_num,c1,c2,c3,b from table_change_string_group_double
 POSTHOOK: type: QUERY
 Explain
-Plan optimized by CBO.
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
 
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Map 1 vectorized, llap
-      File Output Operator [FS_4]
-        Select Operator [SEL_3] (rows=5 width=422)
-          Output:["_col0","_col1","_col2","_col3","_col4"]
-          TableScan [TS_0] (rows=5 width=422)
-            
default@table_change_string_group_double,table_change_string_group_double,Tbl:COMPLETE,Col:NONE,Output:["insert_num","c1","c2","c3","b"]
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: table_change_string_group_double
+                  Statistics: Num rows: 5 Data size: 2110 Basic stats: 
COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1, 2, 3, 4]
+                  Select Operator
+                    expressions: insert_num (type: int), c1 (type: double), c2 
(type: double), c3 (type: double), b (type: string)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumns: [0, 1, 2, 3, 4]
+                    Statistics: Num rows: 5 Data size: 2110 Basic stats: 
COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
+                      Statistics: Num rows: 5 Data size: 2110 Basic stats: 
COMPLETE Column stats: NONE
+                      table:
+                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
+                groupByVectorOutput: true
+                inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 5
+                    includeColumns: [0, 1, 2, 3, 4]
+                    dataColumns: insert_num:int, c1:double, c2:double, 
c3:double, b:string
+                    partitionColumnCount: 0
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select insert_num,c1,c2,c3,b from 
table_change_string_group_double
 PREHOOK: type: QUERY
@@ -691,25 +832,72 @@ POSTHOOK: Lineage: 
table_change_numeric_group_string_group_multi_ints_string_gro
 POSTHOOK: Lineage: 
table_change_numeric_group_string_group_multi_ints_string_group.c9 EXPRESSION 
[(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col10,
 type:string, comment:), ]
 POSTHOOK: Lineage: 
table_change_numeric_group_string_group_multi_ints_string_group.insert_num 
EXPRESSION 
[(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
 _col0  _col1   _col2   _col3   _col4   _col5   _col6   _col7   _col8   _col9   
_col10  _col11  _col12  _col13  _col14  _col15  _col16  _col17  _col18  _col19  
_col20  _col21
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization detail
 select 
insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b
 from table_change_numeric_group_string_group_multi_ints_string_group
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization detail
 select 
insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b
 from table_change_numeric_group_string_group_multi_ints_string_group
 POSTHOOK: type: QUERY
 Explain
-Plan optimized by CBO.
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: 
table_change_numeric_group_string_group_multi_ints_string_group
+                  Statistics: Num rows: 5 Data size: 820 Basic stats: COMPLETE 
Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
+                  Select Operator
+                    expressions: insert_num (type: int), c1 (type: string), c2 
(type: string), c3 (type: string), c4 (type: string), c5 (type: char(50)), c6 
(type: char(50)), c7 (type: char(50)), c8 (type: char(50)), c9 (type: char(5)), 
c10 (type: char(5)), c11 (type: char(5)), c12 (type: char(5)), c13 (type: 
varchar(50)), c14 (type: varchar(50)), c15 (type: varchar(50)), c16 (type: 
varchar(50)), c17 (type: varchar(5)), c18 (type: varchar(5)), c19 (type: 
varchar(5)), c20 (type: varchar(5)), b (type: string)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, 
_col15, _col16, _col17, _col18, _col19, _col20, _col21
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
+                    Statistics: Num rows: 5 Data size: 820 Basic stats: 
COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
+                      Statistics: Num rows: 5 Data size: 820 Basic stats: 
COMPLETE Column stats: NONE
+                      table:
+                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
+                groupByVectorOutput: true
+                inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 22
+                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 
13, 14, 15, 16, 17, 18, 19, 20, 21]
+                    dataColumns: insert_num:int, c1:string, c2:string, 
c3:string, c4:string, c5:char(50), c6:char(50), c7:char(50), c8:char(50), 
c9:char(5), c10:char(5), c11:char(5), c12:char(5), c13:varchar(50), 
c14:varchar(50), c15:varchar(50), c16:varchar(50), c17:varchar(5), 
c18:varchar(5), c19:varchar(5), c20:varchar(5), b:string
+                    partitionColumnCount: 0
 
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Map 1 vectorized, llap
-      File Output Operator [FS_4]
-        Select Operator [SEL_3] (rows=5 width=164)
-          
Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21"]
-          TableScan [TS_0] (rows=5 width=164)
-            
default@table_change_numeric_group_string_group_multi_ints_string_group,table_change_numeric_group_string_group_multi_ints_string_group,Tbl:COMPLETE,Col:NONE,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","c19","c20","b"]
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select 
insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b
 from table_change_numeric_group_string_group_multi_ints_string_group
 PREHOOK: type: QUERY
@@ -864,25 +1052,72 @@ POSTHOOK: Lineage: 
table_change_numeric_group_string_group_floating_string_group
 POSTHOOK: Lineage: 
table_change_numeric_group_string_group_floating_string_group.c9 EXPRESSION 
[(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col10,
 type:string, comment:), ]
 POSTHOOK: Lineage: 
table_change_numeric_group_string_group_floating_string_group.insert_num 
EXPRESSION 
[(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
 _col0  _col1   _col2   _col3   _col4   _col5   _col6   _col7   _col8   _col9   
_col10  _col11  _col12  _col13  _col14  _col15  _col16
-PREHOOK: query: explain
+PREHOOK: query: explain vectorization detail
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from 
table_change_numeric_group_string_group_floating_string_group
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: explain vectorization detail
 select insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from 
table_change_numeric_group_string_group_floating_string_group
 POSTHOOK: type: QUERY
 Explain
-Plan optimized by CBO.
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: 
table_change_numeric_group_string_group_floating_string_group
+                  Statistics: Num rows: 5 Data size: 2940 Basic stats: 
COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12, 13, 14, 15, 16]
+                  Select Operator
+                    expressions: insert_num (type: int), c1 (type: string), c2 
(type: string), c3 (type: string), c4 (type: char(50)), c5 (type: char(50)), c6 
(type: char(50)), c7 (type: char(7)), c8 (type: char(7)), c9 (type: char(7)), 
c10 (type: varchar(50)), c11 (type: varchar(50)), c12 (type: varchar(50)), c13 
(type: varchar(7)), c14 (type: varchar(7)), c15 (type: varchar(7)), b (type: 
string)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, 
_col15, _col16
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12, 13, 14, 15, 16]
+                    Statistics: Num rows: 5 Data size: 2940 Basic stats: 
COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
+                      Statistics: Num rows: 5 Data size: 2940 Basic stats: 
COMPLETE Column stats: NONE
+                      table:
+                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: 
hive.vectorized.use.vectorized.input.format IS true
+                groupByVectorOutput: true
+                inputFileFormats: 
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 17
+                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 
13, 14, 15, 16]
+                    dataColumns: insert_num:int, c1:string, c2:string, 
c3:string, c4:char(50), c5:char(50), c6:char(50), c7:char(7), c8:char(7), 
c9:char(7), c10:varchar(50), c11:varchar(50), c12:varchar(50), c13:varchar(7), 
c14:varchar(7), c15:varchar(7), b:string
+                    partitionColumnCount: 0
 
-Stage-0
-  Fetch Operator
-    limit:-1
-    Stage-1
-      Map 1 vectorized, llap
-      File Output Operator [FS_4]
-        Select Operator [SEL_3] (rows=5 width=588)
-          
Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16"]
-          TableScan [TS_0] (rows=5 width=588)
-            
default@table_change_numeric_group_string_group_floating_string_group,table_change_numeric_group_string_group_floating_string_group,Tbl:COMPLETE,Col:NONE,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","b"]
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
 
 PREHOOK: query: select 
insert_num,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from 
table_change_numeric_group_string_group_floating_string_group
 PREHOOK: type: QUERY

Reply via email to