http://git-wip-us.apache.org/repos/asf/hive/blob/aed21d0b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part.q.out 
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part.q.out
index 1d4163c..437770d 100644
--- a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part.q.out
+++ b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part.q.out
@@ -87,73 +87,25 @@ POSTHOOK: Lineage: part_add_int_permute_select 
PARTITION(part=1).b SIMPLE [(valu
 POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).c EXPRESSION 
[(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col4, 
type:string, comment:), ]
 POSTHOOK: Lineage: part_add_int_permute_select PARTITION(part=1).insert_num 
EXPRESSION 
[(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
 _col0  _col1   _col2   _col3
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,a,b from part_add_int_permute_select
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,a,b from part_add_int_permute_select
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_add_int_permute_select
-                  Statistics: Num rows: 2 Data size: 33 Basic stats: COMPLETE 
Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), a 
(type: int), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 4, 1, 2]
-                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE 
Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 2 Data size: 8 Basic stats: 
COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 4
-                    includeColumns: [0, 1, 2]
-                    dataColumns: insert_num:int, a:int, b:string, c:int
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=2 width=4)
+          Output:["_col0","_col1","_col2","_col3"]
+          TableScan [TS_0] (rows=2 width=16)
+            
default@part_add_int_permute_select,part_add_int_permute_select,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","a","b"]
 
 PREHOOK: query: -- SELECT permutation columns to make sure NULL defaulting 
works right
 select insert_num,part,a,b from part_add_int_permute_select
@@ -254,73 +206,25 @@ POSTHOOK: Lineage: part_add_int_string_permute_select 
PARTITION(part=1).c EXPRES
 POSTHOOK: Lineage: part_add_int_string_permute_select PARTITION(part=1).d 
SIMPLE 
[(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col5, 
type:string, comment:), ]
 POSTHOOK: Lineage: part_add_int_string_permute_select 
PARTITION(part=1).insert_num EXPRESSION 
[(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
 _col0  _col1   _col2   _col3   _col4
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,a,b from part_add_int_string_permute_select
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,a,b from part_add_int_string_permute_select
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_add_int_string_permute_select
-                  Statistics: Num rows: 2 Data size: 38 Basic stats: COMPLETE 
Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), a 
(type: int), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 5, 1, 2]
-                    Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE 
Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 2 Data size: 8 Basic stats: 
COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 5
-                    includeColumns: [0, 1, 2]
-                    dataColumns: insert_num:int, a:int, b:string, c:int, 
d:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=2 width=4)
+          Output:["_col0","_col1","_col2","_col3"]
+          TableScan [TS_0] (rows=2 width=19)
+            
default@part_add_int_string_permute_select,part_add_int_string_permute_select,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","a","b"]
 
 PREHOOK: query: -- SELECT permutation columns to make sure NULL defaulting 
works right
 select insert_num,part,a,b from part_add_int_string_permute_select
@@ -483,73 +387,25 @@ POSTHOOK: Lineage: part_change_string_group_double 
PARTITION(part=1).c2 SIMPLE [
 POSTHOOK: Lineage: part_change_string_group_double PARTITION(part=1).c3 SIMPLE 
[(schema_evolution_data)schema_evolution_data.FieldSchema(name:double1, 
type:double, comment:null), ]
 POSTHOOK: Lineage: part_change_string_group_double 
PARTITION(part=1).insert_num SIMPLE 
[(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, 
type:int, comment:null), ]
 insert_num     double1 double1 double1 _c4
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,b from part_change_string_group_double
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,b from part_change_string_group_double
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Plan optimized by CBO.
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_string_group_double
-                  Statistics: Num rows: 5 Data size: 284 Basic stats: COMPLETE 
Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 
(type: double), c2 (type: double), c3 (type: double), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 5, 1, 2, 3, 4]
-                    Statistics: Num rows: 5 Data size: 20 Basic stats: 
COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 5 Data size: 20 Basic stats: 
COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 5
-                    includeColumns: [0, 1, 2, 3, 4]
-                    dataColumns: insert_num:int, c1:double, c2:double, 
c3:double, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=5 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+          TableScan [TS_0] (rows=5 width=56)
+            
default@part_change_string_group_double,part_change_string_group_double,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,b from 
part_change_string_group_double
 PREHOOK: type: QUERY
@@ -649,73 +505,25 @@ POSTHOOK: Lineage: 
part_change_date_group_string_group_date_timestamp PARTITION(
 POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp 
PARTITION(part=1).c9 EXPRESSION 
[(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col10,
 type:string, comment:), ]
 POSTHOOK: Lineage: part_change_date_group_string_group_date_timestamp 
PARTITION(part=1).insert_num EXPRESSION 
[(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
 _col0  _col1   _col2   _col3   _col4   _col5   _col6   _col7   _col8   _col9   
_col10  _col11
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from 
part_change_date_group_string_group_date_timestamp
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from 
part_change_date_group_string_group_date_timestamp
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_date_group_string_group_date_timestamp
-                  Statistics: Num rows: 6 Data size: 926 Basic stats: COMPLETE 
Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 
(type: string), c2 (type: char(50)), c3 (type: char(15)), c4 (type: 
varchar(50)), c5 (type: varchar(15)), c6 (type: string), c7 (type: char(50)), 
c8 (type: char(15)), c9 (type: varchar(50)), c10 (type: varchar(15)), b (type: 
string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 12, 1, 2, 3, 4, 5, 6, 7, 
8, 9, 10, 11]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                    dataColumns: insert_num:int, c1:string, c2:char(50), 
c3:char(15), c4:varchar(50), c5:varchar(15), c6:string, c7:char(50), 
c8:char(15), c9:varchar(50), c10:varchar(15), b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          
Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"]
+          TableScan [TS_0] (rows=6 width=154)
+            
default@part_change_date_group_string_group_date_timestamp,part_change_date_group_string_group_date_timestamp,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from 
part_change_date_group_string_group_date_timestamp
 PREHOOK: type: QUERY
@@ -892,73 +700,25 @@ POSTHOOK: Lineage: 
part_change_numeric_group_string_group_multi_ints_string_grou
 POSTHOOK: Lineage: 
part_change_numeric_group_string_group_multi_ints_string_group 
PARTITION(part=1).c9 EXPRESSION 
[(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col10,
 type:string, comment:), ]
 POSTHOOK: Lineage: 
part_change_numeric_group_string_group_multi_ints_string_group 
PARTITION(part=1).insert_num EXPRESSION 
[(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
 _col0  _col1   _col2   _col3   _col4   _col5   _col6   _col7   _col8   _col9   
_col10  _col11  _col12  _col13  _col14  _col15  _col16  _col17  _col18  _col19  
_col20  _col21
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b
 from part_change_numeric_group_string_group_multi_ints_string_group
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b
 from part_change_numeric_group_string_group_multi_ints_string_group
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Plan optimized by CBO.
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: 
part_change_numeric_group_string_group_multi_ints_string_group
-                  Statistics: Num rows: 6 Data size: 918 Basic stats: COMPLETE 
Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 
(type: string), c2 (type: string), c3 (type: string), c4 (type: string), c5 
(type: char(50)), c6 (type: char(50)), c7 (type: char(50)), c8 (type: 
char(50)), c9 (type: char(5)), c10 (type: char(5)), c11 (type: char(5)), c12 
(type: char(5)), c13 (type: varchar(50)), c14 (type: varchar(50)), c15 (type: 
varchar(50)), c16 (type: varchar(50)), c17 (type: varchar(5)), c18 (type: 
varchar(5)), c19 (type: varchar(5)), c20 (type: varchar(5)), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, 
_col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 22, 1, 2, 3, 4, 5, 6, 7, 
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 22
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 
13, 14, 15, 16, 17, 18, 19, 20, 21]
-                    dataColumns: insert_num:int, c1:string, c2:string, 
c3:string, c4:string, c5:char(50), c6:char(50), c7:char(50), c8:char(50), 
c9:char(5), c10:char(5), c11:char(5), c12:char(5), c13:varchar(50), 
c14:varchar(50), c15:varchar(50), c16:varchar(50), c17:varchar(5), 
c18:varchar(5), c19:varchar(5), c20:varchar(5), b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          
Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22"]
+          TableScan [TS_0] (rows=6 width=153)
+            
default@part_change_numeric_group_string_group_multi_ints_string_group,part_change_numeric_group_string_group_multi_ints_string_group,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","c19","c20","b"]
 
 PREHOOK: query: select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,b
 from part_change_numeric_group_string_group_multi_ints_string_group
 PREHOOK: type: QUERY
@@ -1117,73 +877,25 @@ POSTHOOK: Lineage: 
part_change_numeric_group_string_group_floating_string_group
 POSTHOOK: Lineage: 
part_change_numeric_group_string_group_floating_string_group 
PARTITION(part=1).c9 EXPRESSION 
[(values__tmp__table__7)values__tmp__table__7.FieldSchema(name:tmp_values_col10,
 type:string, comment:), ]
 POSTHOOK: Lineage: 
part_change_numeric_group_string_group_floating_string_group 
PARTITION(part=1).insert_num EXPRESSION 
[(values__tmp__table__7)values__tmp__table__7.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
 _col0  _col1   _col2   _col3   _col4   _col5   _col6   _col7   _col8   _col9   
_col10  _col11  _col12  _col13  _col14  _col15  _col16
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b 
from part_change_numeric_group_string_group_floating_string_group
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b 
from part_change_numeric_group_string_group_floating_string_group
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: 
part_change_numeric_group_string_group_floating_string_group
-                  Statistics: Num rows: 6 Data size: 1386 Basic stats: 
COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12, 13, 14, 15, 16, 17]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 
(type: string), c2 (type: string), c3 (type: string), c4 (type: char(50)), c5 
(type: char(50)), c6 (type: char(50)), c7 (type: char(7)), c8 (type: char(7)), 
c9 (type: char(7)), c10 (type: varchar(50)), c11 (type: varchar(50)), c12 
(type: varchar(50)), c13 (type: varchar(7)), c14 (type: varchar(7)), c15 (type: 
varchar(7)), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, 
_col15, _col16, _col17
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 17, 1, 2, 3, 4, 5, 6, 7, 
8, 9, 10, 11, 12, 13, 14, 15, 16]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 17
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 
13, 14, 15, 16]
-                    dataColumns: insert_num:int, c1:string, c2:string, 
c3:string, c4:char(50), c5:char(50), c6:char(50), c7:char(7), c8:char(7), 
c9:char(7), c10:varchar(50), c11:varchar(50), c12:varchar(50), c13:varchar(7), 
c14:varchar(7), c15:varchar(7), b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          
Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17"]
+          TableScan [TS_0] (rows=6 width=231)
+            
default@part_change_numeric_group_string_group_floating_string_group,part_change_numeric_group_string_group_floating_string_group,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","b"]
 
 PREHOOK: query: select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,b from 
part_change_numeric_group_string_group_floating_string_group
 PREHOOK: type: QUERY
@@ -1330,73 +1042,25 @@ POSTHOOK: Lineage: 
part_change_string_group_string_group_string PARTITION(part=1
 POSTHOOK: Lineage: part_change_string_group_string_group_string 
PARTITION(part=1).c9 EXPRESSION 
[(values__tmp__table__8)values__tmp__table__8.FieldSchema(name:tmp_values_col10,
 type:string, comment:), ]
 POSTHOOK: Lineage: part_change_string_group_string_group_string 
PARTITION(part=1).insert_num EXPRESSION 
[(values__tmp__table__8)values__tmp__table__8.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
 _col0  _col1   _col2   _col3   _col4   _col5   _col6   _col7   _col8   _col9   
_col10  _col11
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from 
part_change_string_group_string_group_string
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from 
part_change_string_group_string_group_string
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_string_group_string_group_string
-                  Statistics: Num rows: 6 Data size: 421 Basic stats: COMPLETE 
Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 
(type: char(50)), c2 (type: char(9)), c3 (type: varchar(50)), c4 (type: 
char(9)), c5 (type: varchar(50)), c6 (type: varchar(9)), c7 (type: string), c8 
(type: char(50)), c9 (type: char(9)), c10 (type: string), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 12, 1, 2, 3, 4, 5, 6, 7, 
8, 9, 10, 11]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 12
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
-                    dataColumns: insert_num:int, c1:char(50), c2:char(9), 
c3:varchar(50), c4:char(9), c5:varchar(50), c6:varchar(9), c7:string, 
c8:char(50), c9:char(9), c10:string, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          
Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"]
+          TableScan [TS_0] (rows=6 width=70)
+            
default@part_change_string_group_string_group_string,part_change_string_group_string_group_string,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,b from 
part_change_string_group_string_group_string
 PREHOOK: type: QUERY
@@ -1577,73 +1241,25 @@ POSTHOOK: Lineage: 
part_change_lower_to_higher_numeric_group_tinyint_to_bigint P
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint 
PARTITION(part=1).c9 EXPRESSION 
[(values__tmp__table__9)values__tmp__table__9.FieldSchema(name:tmp_values_col10,
 type:string, comment:), ]
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_tinyint_to_bigint 
PARTITION(part=1).insert_num EXPRESSION 
[(values__tmp__table__9)values__tmp__table__9.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
 _col0  _col1   _col2   _col3   _col4   _col5   _col6   _col7   _col8   _col9   
_col10  _col11  _col12  _col13  _col14  _col15  _col16  _col17  _col18  _col19
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b
 from part_change_lower_to_higher_numeric_group_tinyint_to_bigint
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b
 from part_change_lower_to_higher_numeric_group_tinyint_to_bigint
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: 
part_change_lower_to_higher_numeric_group_tinyint_to_bigint
-                  Statistics: Num rows: 6 Data size: 860 Basic stats: COMPLETE 
Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 
(type: smallint), c2 (type: int), c3 (type: bigint), c4 (type: decimal(38,18)), 
c5 (type: float), c6 (type: double), c7 (type: int), c8 (type: bigint), c9 
(type: decimal(38,18)), c10 (type: float), c11 (type: double), c12 (type: 
bigint), c13 (type: decimal(38,18)), c14 (type: float), c15 (type: double), c16 
(type: decimal(38,18)), c17 (type: float), c18 (type: double), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, 
_col15, _col16, _col17, _col18, _col19, _col20
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 20, 1, 2, 3, 4, 5, 6, 7, 
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 20
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 
13, 14, 15, 16, 17, 18, 19]
-                    dataColumns: insert_num:int, c1:smallint, c2:int, 
c3:bigint, c4:decimal(38,18), c5:float, c6:double, c7:int, c8:bigint, 
c9:decimal(38,18), c10:float, c11:double, c12:bigint, c13:decimal(38,18), 
c14:float, c15:double, c16:decimal(38,18), c17:float, c18:double, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          
Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20"]
+          TableScan [TS_0] (rows=6 width=143)
+            
default@part_change_lower_to_higher_numeric_group_tinyint_to_bigint,part_change_lower_to_higher_numeric_group_tinyint_to_bigint,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","b"]
 
 PREHOOK: query: select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,b
 from part_change_lower_to_higher_numeric_group_tinyint_to_bigint
 PREHOOK: type: QUERY
@@ -1754,73 +1370,25 @@ POSTHOOK: Lineage: 
part_change_lower_to_higher_numeric_group_decimal_to_float PA
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float 
PARTITION(part=1).c3 EXPRESSION 
[(values__tmp__table__10)values__tmp__table__10.FieldSchema(name:tmp_values_col4,
 type:string, comment:), ]
 POSTHOOK: Lineage: part_change_lower_to_higher_numeric_group_decimal_to_float 
PARTITION(part=1).insert_num EXPRESSION 
[(values__tmp__table__10)values__tmp__table__10.FieldSchema(name:tmp_values_col1,
 type:string, comment:), ]
 _col0  _col1   _col2   _col3   _col4
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,b from 
part_change_lower_to_higher_numeric_group_decimal_to_float
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,b from 
part_change_lower_to_higher_numeric_group_decimal_to_float
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: 
part_change_lower_to_higher_numeric_group_decimal_to_float
-                  Statistics: Num rows: 6 Data size: 428 Basic stats: COMPLETE 
Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 
(type: float), c2 (type: double), c3 (type: double), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 5, 1, 2, 3, 4]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 5
-                    includeColumns: [0, 1, 2, 3, 4]
-                    dataColumns: insert_num:int, c1:float, c2:double, 
c3:double, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+          TableScan [TS_0] (rows=6 width=71)
+            
default@part_change_lower_to_higher_numeric_group_decimal_to_float,part_change_lower_to_higher_numeric_group_decimal_to_float,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,b from 
part_change_lower_to_higher_numeric_group_decimal_to_float
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/aed21d0b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_complex.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_complex.q.out
 
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_complex.q.out
index aa09b53..e35222b 100644
--- 
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_complex.q.out
+++ 
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_complex.q.out
@@ -153,55 +153,25 @@ POSTHOOK: Lineage: part_change_various_various_struct1 
PARTITION(part=1).b SIMPL
 POSTHOOK: Lineage: part_change_various_various_struct1 
PARTITION(part=1).insert_num SIMPLE 
[(complex_struct1_c_txt)complex_struct1_c_txt.FieldSchema(name:insert_num, 
type:int, comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_struct1 PARTITION(part=1).s1 
SIMPLE [(complex_struct1_c_txt)complex_struct1_c_txt.FieldSchema(name:s1, 
type:struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>,
 comment:null), ]
 complex_struct1_c_txt.insert_num       complex_struct1_c_txt.s1        
complex_struct1_c_txt.b
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,s1,b from part_change_various_various_struct1
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,s1,b from part_change_various_various_struct1
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_various_various_struct1
-                  Statistics: Num rows: 6 Data size: 931 Basic stats: COMPLETE 
Column stats: PARTIAL
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), s1 
(type: 
struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>),
 b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                notVectorizedReason: Select expression for SELECT operator: 
Data type 
struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>
 of Column[s1] not supported
-                vectorized: false
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 llap
+      File Output Operator [FS_2]
+        Select Operator [SEL_1] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3"]
+          TableScan [TS_0] (rows=6 width=155)
+            
default@part_change_various_various_struct1,part_change_various_various_struct1,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","s1","b"]
 
 PREHOOK: query: select insert_num,part,s1,b from 
part_change_various_various_struct1
 PREHOOK: type: QUERY
@@ -447,55 +417,25 @@ POSTHOOK: Lineage: part_add_various_various_struct2 
PARTITION(part=1).b SIMPLE [
 POSTHOOK: Lineage: part_add_various_various_struct2 
PARTITION(part=1).insert_num SIMPLE 
[(complex_struct2_d_txt)complex_struct2_d_txt.FieldSchema(name:insert_num, 
type:int, comment:null), ]
 POSTHOOK: Lineage: part_add_various_various_struct2 PARTITION(part=1).s2 
SIMPLE [(complex_struct2_d_txt)complex_struct2_d_txt.FieldSchema(name:s2, 
type:struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>,
 comment:null), ]
 complex_struct2_d_txt.insert_num       complex_struct2_d_txt.b 
complex_struct2_d_txt.s2
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,b,s2 from part_add_various_various_struct2
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,b,s2 from part_add_various_various_struct2
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Plan optimized by CBO.
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_add_various_various_struct2
-                  Statistics: Num rows: 8 Data size: 939 Basic stats: COMPLETE 
Column stats: PARTIAL
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), b 
(type: string), s2 (type: 
struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Statistics: Num rows: 8 Data size: 32 Basic stats: 
COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 8 Data size: 32 Basic stats: 
COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                notVectorizedReason: Select expression for SELECT operator: 
Data type 
struct<c1:string,c2:string,c3:string,c4:string,c5:string,c6:string,c7:string,c8:string,c9:string,c10:string,c11:string,c12:string,c13:string>
 of Column[s2] not supported
-                vectorized: false
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 llap
+      File Output Operator [FS_2]
+        Select Operator [SEL_1] (rows=8 width=4)
+          Output:["_col0","_col1","_col2","_col3"]
+          TableScan [TS_0] (rows=8 width=117)
+            
default@part_add_various_various_struct2,part_add_various_various_struct2,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","b","s2"]
 
 PREHOOK: query: select insert_num,part,b,s2 from 
part_add_various_various_struct2
 PREHOOK: type: QUERY
@@ -665,55 +605,25 @@ POSTHOOK: Lineage: part_add_to_various_various_struct4 
PARTITION(part=1).b SIMPL
 POSTHOOK: Lineage: part_add_to_various_various_struct4 
PARTITION(part=1).insert_num SIMPLE 
[(complex_struct4_c_txt)complex_struct4_c_txt.FieldSchema(name:insert_num, 
type:int, comment:null), ]
 POSTHOOK: Lineage: part_add_to_various_various_struct4 PARTITION(part=1).s3 
SIMPLE [(complex_struct4_c_txt)complex_struct4_c_txt.FieldSchema(name:s3, 
type:struct<c1:boolean,c2:tinyint,c3:smallint,c4:int,c5:bigint,c6:float,c7:double,c8:decimal(38,18),c9:char(25),c10:varchar(25),c11:timestamp,c12:date,c13:binary>,
 comment:null), ]
 complex_struct4_c_txt.insert_num       complex_struct4_c_txt.b 
complex_struct4_c_txt.s3
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,b,s3 from part_add_to_various_various_struct4
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,b,s3 from part_add_to_various_various_struct4
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_add_to_various_various_struct4
-                  Statistics: Num rows: 4 Data size: 353 Basic stats: COMPLETE 
Column stats: PARTIAL
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), b 
(type: string), s3 (type: 
struct<c1:boolean,c2:tinyint,c3:smallint,c4:int,c5:bigint,c6:float,c7:double,c8:decimal(38,18),c9:char(25),c10:varchar(25),c11:timestamp,c12:date,c13:binary>)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Statistics: Num rows: 4 Data size: 16 Basic stats: 
COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 4 Data size: 16 Basic stats: 
COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                notVectorizedReason: Select expression for SELECT operator: 
Data type 
struct<c1:boolean,c2:tinyint,c3:smallint,c4:int,c5:bigint,c6:float,c7:double,c8:decimal(38,18),c9:char(25),c10:varchar(25),c11:timestamp,c12:date,c13:binary>
 of Column[s3] not supported
-                vectorized: false
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 llap
+      File Output Operator [FS_2]
+        Select Operator [SEL_1] (rows=4 width=4)
+          Output:["_col0","_col1","_col2","_col3"]
+          TableScan [TS_0] (rows=4 width=88)
+            
default@part_add_to_various_various_struct4,part_add_to_various_various_struct4,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","b","s3"]
 
 PREHOOK: query: select insert_num,part,b,s3 from 
part_add_to_various_various_struct4
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/aed21d0b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_primitive.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_primitive.q.out
 
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_primitive.q.out
index e8b5c71..fb38687 100644
--- 
a/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_primitive.q.out
+++ 
b/ql/src/test/results/clientpositive/llap/schema_evol_text_vec_part_all_primitive.q.out
@@ -282,73 +282,25 @@ POSTHOOK: Lineage: 
part_change_various_various_boolean_to_bigint PARTITION(part=
 POSTHOOK: Lineage: part_change_various_various_boolean_to_bigint 
PARTITION(part=1).c9 SIMPLE 
[(schema_evolution_data)schema_evolution_data.FieldSchema(name:boolean1, 
type:boolean, comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_boolean_to_bigint 
PARTITION(part=1).insert_num SIMPLE 
[(schema_evolution_data)schema_evolution_data.FieldSchema(name:insert_num, 
type:int, comment:null), ]
 insert_num     boolean1        boolean1        boolean1        boolean1        
boolean1        boolean1        boolean1        boolean1        boolean1        
tinyint1        tinyint1        tinyint1        tinyint1        tinyint1        
tinyint1        tinyint1        tinyint1        tinyint1        tinyint1        
tinyint1        smallint1       smallint1       smallint1       smallint1       
smallint1       smallint1       smallint1       smallint1       smallint1       
smallint1       smallint1       int1    int1    int1    int1    int1    int1    
int1    int1    int1    int1    int1    bigint1 bigint1 bigint1 bigint1 bigint1 
bigint1 bigint1 bigint1 bigint1 bigint1 bigint1 _c54
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b
 from part_change_various_various_boolean_to_bigint
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b
 from part_change_various_various_boolean_to_bigint
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_various_various_boolean_to_bigint
-                  Statistics: Num rows: 10 Data size: 4707 Basic stats: 
COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 
30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 
50, 51, 52, 53, 54, 55]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 
(type: boolean), c2 (type: boolean), c3 (type: boolean), c4 (type: boolean), c5 
(type: boolean), c6 (type: boolean), c7 (type: boolean), c8 (type: boolean), c9 
(type: boolean), c10 (type: tinyint), c11 (type: tinyint), c12 (type: tinyint), 
c13 (type: tinyint), c14 (type: tinyint), c15 (type: tinyint), c16 (type: 
tinyint), c17 (type: tinyint), c18 (type: tinyint), c19 (type: tinyint), c20 
(type: tinyint), c21 (type: smallint), c22 (type: smallint), c23 (type: 
smallint), c24 (type: smallint), c25 (type: smallint), c26 (type: smallint), 
c27 (type: smallint), c28 (type: smallint), c29 (type: smallint), c30 (type: 
smallint), c31 (type: smallint), c32 (type: int), c33 (type: int), c34 (type: 
int), c35 (type: int), c36 (type: int), c37 (type: int), c38 (type: int), c39 
(type: int), c40 (type: int), c41 (type: int), c42 (type: int), c43 (type: 
bigint), c44 (type: bigint), c45 (type: bigint), c46 (type: bigint), c4
 7 (type: bigint), c48 (type: bigint), c49 (type: bigint), c50 (type: bigint), 
c51 (type: bigint), c52 (type: bigint), c53 (type: bigint), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, 
_col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, 
_col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, 
_col35, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44, 
_col45, _col46, _col47, _col48, _col49, _col50, _col51, _col52, _col53, _col54, 
_col55
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 55, 1, 2, 3, 4, 5, 6, 7, 
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 
28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 
48, 49, 50, 51, 52, 53, 54]
-                    Statistics: Num rows: 10 Data size: 40 Basic stats: 
COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 10 Data size: 40 Basic stats: 
COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 55
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 
33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 
53, 54]
-                    dataColumns: insert_num:int, c1:boolean, c2:boolean, 
c3:boolean, c4:boolean, c5:boolean, c6:boolean, c7:boolean, c8:boolean, 
c9:boolean, c10:tinyint, c11:tinyint, c12:tinyint, c13:tinyint, c14:tinyint, 
c15:tinyint, c16:tinyint, c17:tinyint, c18:tinyint, c19:tinyint, c20:tinyint, 
c21:smallint, c22:smallint, c23:smallint, c24:smallint, c25:smallint, 
c26:smallint, c27:smallint, c28:smallint, c29:smallint, c30:smallint, 
c31:smallint, c32:int, c33:int, c34:int, c35:int, c36:int, c37:int, c38:int, 
c39:int, c40:int, c41:int, c42:int, c43:bigint, c44:bigint, c45:bigint, 
c46:bigint, c47:bigint, c48:bigint, c49:bigint, c50:bigint, c51:bigint, 
c52:bigint, c53:bigint, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=10 width=4)
+          
Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41","_col42","_col43","_col44","_col45","_col46","_col47","_col48","_col49","_col50","_col51","_col52","_col53","_col54","_col55"]
+          TableScan [TS_0] (rows=10 width=470)
+            
default@part_change_various_various_boolean_to_bigint,part_change_various_various_boolean_to_bigint,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","c19","c20","c21","c22","c23","c24","c25","c26","c27","c28","c29","c30","c31","c32","c33","c34","c35","c36","c37","c38","c39","c40","c41","c42","c43","c44","c45","c46","c47","c48","c49","c50","c51","c52","c53","b"]
 
 PREHOOK: query: select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,c34,c35,c36,c37,c38,c39,c40,c41,c42,c43,c44,c45,c46,c47,c48,c49,c50,c51,c52,c53,b
 from part_change_various_various_boolean_to_bigint
 PREHOOK: type: QUERY
@@ -545,73 +497,25 @@ POSTHOOK: Lineage: 
part_change_various_various_decimal_to_double PARTITION(part=
 POSTHOOK: Lineage: part_change_various_various_decimal_to_double 
PARTITION(part=1).c9 SIMPLE 
[(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:decimal1, 
type:decimal(38,18), comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_decimal_to_double 
PARTITION(part=1).insert_num SIMPLE 
[(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:insert_num, 
type:int, comment:null), ]
 insert_num     decimal1        decimal1        decimal1        decimal1        
decimal1        decimal1        decimal1        decimal1        decimal1        
decimal1        decimal1        float1  float1  float1  float1  float1  float1  
float1  float1  float1  float1  float1  double1 double1 double1 double1 double1 
double1 double1 double1 double1 double1 double1 _c34
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b
 from part_change_various_various_decimal_to_double
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b
 from part_change_various_various_decimal_to_double
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+Plan optimized by CBO.
 
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_various_various_decimal_to_double
-                  Statistics: Num rows: 6 Data size: 2551 Basic stats: 
COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 
30, 31, 32, 33, 34, 35]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 
(type: decimal(38,18)), c2 (type: decimal(38,18)), c3 (type: decimal(38,18)), 
c4 (type: decimal(38,18)), c5 (type: decimal(38,18)), c6 (type: 
decimal(38,18)), c7 (type: decimal(38,18)), c8 (type: decimal(38,18)), c9 
(type: decimal(38,18)), c10 (type: decimal(38,18)), c11 (type: decimal(38,18)), 
c12 (type: float), c13 (type: float), c14 (type: float), c15 (type: float), c16 
(type: float), c17 (type: float), c18 (type: float), c19 (type: float), c20 
(type: float), c21 (type: float), c22 (type: float), c23 (type: double), c24 
(type: double), c25 (type: double), c26 (type: double), c27 (type: double), c28 
(type: double), c29 (type: double), c30 (type: double), c31 (type: double), c32 
(type: double), c33 (type: double), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, 
_col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, 
_col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, 
_col35
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 35, 1, 2, 3, 4, 5, 6, 7, 
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 
28, 29, 30, 31, 32, 33, 34]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 35
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 
33, 34]
-                    dataColumns: insert_num:int, c1:decimal(38,18), 
c2:decimal(38,18), c3:decimal(38,18), c4:decimal(38,18), c5:decimal(38,18), 
c6:decimal(38,18), c7:decimal(38,18), c8:decimal(38,18), c9:decimal(38,18), 
c10:decimal(38,18), c11:decimal(38,18), c12:float, c13:float, c14:float, 
c15:float, c16:float, c17:float, c18:float, c19:float, c20:float, c21:float, 
c22:float, c23:double, c24:double, c25:double, c26:double, c27:double, 
c28:double, c29:double, c30:double, c31:double, c32:double, c33:double, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          
Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35"]
+          TableScan [TS_0] (rows=6 width=425)
+            
default@part_change_various_various_decimal_to_double,part_change_various_various_decimal_to_double,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","c13","c14","c15","c16","c17","c18","c19","c20","c21","c22","c23","c24","c25","c26","c27","c28","c29","c30","c31","c32","c33","b"]
 
 PREHOOK: query: select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19,c20,c21,c22,c23,c24,c25,c26,c27,c28,c29,c30,c31,c32,c33,b
 from part_change_various_various_decimal_to_double
 PREHOOK: type: QUERY
@@ -724,73 +628,25 @@ POSTHOOK: Lineage: part_change_various_various_timestamp 
PARTITION(part=1).c8 SI
 POSTHOOK: Lineage: part_change_various_various_timestamp PARTITION(part=1).c9 
SIMPLE 
[(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:timestamp1, 
type:timestamp, comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_timestamp 
PARTITION(part=1).insert_num SIMPLE 
[(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:insert_num, 
type:int, comment:null), ]
 insert_num     timestamp1      timestamp1      timestamp1      timestamp1      
timestamp1      timestamp1      timestamp1      timestamp1      timestamp1      
timestamp1      timestamp1      timestamp1      _c13
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from 
part_change_various_various_timestamp
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from 
part_change_various_various_timestamp
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+Plan optimized by CBO.
 
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_various_various_timestamp
-                  Statistics: Num rows: 6 Data size: 870 Basic stats: COMPLETE 
Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12, 13, 14]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 
(type: timestamp), c2 (type: timestamp), c3 (type: timestamp), c4 (type: 
timestamp), c5 (type: timestamp), c6 (type: timestamp), c7 (type: timestamp), 
c8 (type: timestamp), c9 (type: timestamp), c10 (type: timestamp), c11 (type: 
timestamp), c12 (type: timestamp), b (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 14, 1, 2, 3, 4, 5, 6, 7, 
8, 9, 10, 11, 12, 13]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 14
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 
13]
-                    dataColumns: insert_num:int, c1:timestamp, c2:timestamp, 
c3:timestamp, c4:timestamp, c5:timestamp, c6:timestamp, c7:timestamp, 
c8:timestamp, c9:timestamp, c10:timestamp, c11:timestamp, c12:timestamp, 
b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          
Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14"]
+          TableScan [TS_0] (rows=6 width=145)
+            
default@part_change_various_various_timestamp,part_change_various_various_timestamp,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","c7","c8","c9","c10","c11","c12","b"]
 
 PREHOOK: query: select 
insert_num,part,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,b from 
part_change_various_various_timestamp
 PREHOOK: type: QUERY
@@ -887,73 +743,25 @@ POSTHOOK: Lineage: part_change_various_various_date 
PARTITION(part=1).c3 SIMPLE
 POSTHOOK: Lineage: part_change_various_various_date PARTITION(part=1).c4 
SIMPLE 
[(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:date1, 
type:date, comment:null), ]
 POSTHOOK: Lineage: part_change_various_various_date 
PARTITION(part=1).insert_num SIMPLE 
[(schema_evolution_data_2)schema_evolution_data_2.FieldSchema(name:insert_num, 
type:int, comment:null), ]
 insert_num     date1   date1   date1   date1   _c5
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,b from part_change_various_various_date
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_various_various_date
-                  Statistics: Num rows: 6 Data size: 376 Basic stats: COMPLETE 
Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 
(type: date), c2 (type: date), c3 (type: date), c4 (type: date), b (type: 
string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 6, 1, 2, 3, 4, 5]
-                    Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 6 Data size: 24 Basic stats: 
COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 6
-                    includeColumns: [0, 1, 2, 3, 4, 5]
-                    dataColumns: insert_num:int, c1:date, c2:date, c3:date, 
c4:date, b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=6 width=4)
+          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
+          TableScan [TS_0] (rows=6 width=62)
+            
default@part_change_various_various_date,part_change_various_various_date,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,b from 
part_change_various_various_date
 PREHOOK: type: QUERY
@@ -1131,73 +939,25 @@ POSTHOOK: Lineage: 
part_change_same_type_different_params PARTITION(part=2).c5 S
 POSTHOOK: Lineage: part_change_same_type_different_params PARTITION(part=2).c6 
SIMPLE [(same_type1_c_txt)same_type1_c_txt.FieldSchema(name:c6, 
type:decimal(25,15), comment:null), ]
 POSTHOOK: Lineage: part_change_same_type_different_params 
PARTITION(part=2).insert_num SIMPLE 
[(same_type1_c_txt)same_type1_c_txt.FieldSchema(name:insert_num, type:int, 
comment:null), ]
 same_type1_c_txt.insert_num    same_type1_c_txt.c1     same_type1_c_txt.c2     
same_type1_c_txt.c3     same_type1_c_txt.c4     same_type1_c_txt.c5     
same_type1_c_txt.c6     same_type1_c_txt.b
-PREHOOK: query: explain vectorization detail
+PREHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,b from 
part_change_same_type_different_params
 PREHOOK: type: QUERY
-POSTHOOK: query: explain vectorization detail
+POSTHOOK: query: explain
 select insert_num,part,c1,c2,c3,c4,c5,c6,b from 
part_change_same_type_different_params
 POSTHOOK: type: QUERY
 Explain
-PLAN VECTORIZATION:
-  enabled: true
-  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: part_change_same_type_different_params
-                  Statistics: Num rows: 13 Data size: 1311 Basic stats: 
COMPLETE Column stats: PARTIAL
-                  TableScan Vectorization:
-                      native: true
-                      projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8]
-                  Select Operator
-                    expressions: insert_num (type: int), part (type: int), c1 
(type: char(8)), c2 (type: char(32)), c3 (type: varchar(15)), c4 (type: 
varchar(18)), c5 (type: decimal(10,2)), c6 (type: decimal(25,15)), b (type: 
string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8
-                    Select Vectorization:
-                        className: VectorSelectOperator
-                        native: true
-                        projectedOutputColumns: [0, 8, 1, 2, 3, 4, 5, 6, 7]
-                    Statistics: Num rows: 13 Data size: 52 Basic stats: 
COMPLETE Column stats: PARTIAL
-                    File Output Operator
-                      compressed: false
-                      File Sink Vectorization:
-                          className: VectorFileSinkOperator
-                          native: false
-                      Statistics: Num rows: 13 Data size: 52 Basic stats: 
COMPLETE Column stats: PARTIAL
-                      table:
-                          input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized, llap
-            Map Vectorization:
-                enabled: true
-                enabledConditionsMet: 
hive.vectorized.use.vector.serde.deserialize IS true
-                groupByVectorOutput: true
-                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
-                allNative: false
-                usesVectorUDFAdaptor: false
-                vectorized: true
-                rowBatchContext:
-                    dataColumnCount: 8
-                    includeColumns: [0, 1, 2, 3, 4, 5, 6, 7]
-                    dataColumns: insert_num:int, c1:char(8), c2:char(32), 
c3:varchar(15), c4:varchar(18), c5:decimal(10,2), c6:decimal(25,15), b:string
-                    partitionColumnCount: 1
-                    partitionColumns: part:int
+Plan optimized by CBO.
 
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
+Stage-0
+  Fetch Operator
+    limit:-1
+    Stage-1
+      Map 1 vectorized, llap
+      File Output Operator [FS_4]
+        Select Operator [SEL_3] (rows=13 width=4)
+          
Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"]
+          TableScan [TS_0] (rows=13 width=100)
+            
default@part_change_same_type_different_params,part_change_same_type_different_params,Tbl:COMPLETE,Col:PARTIAL,Output:["insert_num","c1","c2","c3","c4","c5","c6","b"]
 
 PREHOOK: query: select insert_num,part,c1,c2,c3,c4,c5,c6,b from 
part_change_same_type_different_params
 PREHOOK: type: QUERY

Reply via email to