http://git-wip-us.apache.org/repos/asf/hive/blob/8f7c5788/ql/src/test/results/clientpositive/llap/union_remove_26.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/union_remove_26.q.out 
b/ql/src/test/results/clientpositive/llap/union_remove_26.q.out
index 9ddc2c8..a8cb207 100644
--- a/ql/src/test/results/clientpositive/llap/union_remove_26.q.out
+++ b/ql/src/test/results/clientpositive/llap/union_remove_26.q.out
@@ -181,10 +181,10 @@ STAGE PLANS:
                       aggregations: count(), min(val), max(val)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 1 Data size: 16 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 20 Basic stats: 
COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Statistics: Num rows: 1 Data size: 16 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 20 Basic stats: 
COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint), _col1 (type: 
int), _col2 (type: int)
             Execution mode: llap
             LLAP IO: no inputs
@@ -230,10 +230,10 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0), min(VALUE._col1), 
max(VALUE._col2)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE 
Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE 
Column stats: NONE
                   table:
                       input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/8f7c5788/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out 
b/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out
index e63cbf8..0f9abac 100644
--- a/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_adaptor_usage_mode.q.out
@@ -125,17 +125,17 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 288 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: c2 regexp 'val' (type: boolean), c4 regexp 
'val' (type: boolean), (c2 regexp 'val' = c4 regexp 'val') (type: boolean)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 288 Basic stats: 
COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 288 Basic stats: 
COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 288 Basic stats: 
COMPLETE Column stats: NONE
                         table:
                             input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -203,17 +203,17 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 288 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: regexp_extract(c2, 'val_([0-9]+)', 1) (type: 
string), regexp_extract(c4, 'val_([0-9]+)', 1) (type: string), 
(regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)) 
(type: boolean)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 288 Basic stats: 
COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 288 Basic stats: 
COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 288 Basic stats: 
COMPLETE Column stats: NONE
                         table:
                             input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -281,17 +281,17 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 288 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: regexp_replace(c2, 'val', 'replaced') (type: 
string), regexp_replace(c4, 'val', 'replaced') (type: string), 
(regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')) 
(type: boolean)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 288 Basic stats: 
COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 288 Basic stats: 
COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 288 Basic stats: 
COMPLETE Column stats: NONE
                         table:
                             input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -359,17 +359,17 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 288 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: c2 regexp 'val' (type: boolean), c4 regexp 
'val' (type: boolean), (c2 regexp 'val' = c4 regexp 'val') (type: boolean)
                     outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 288 Basic stats: 
COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 288 Basic stats: 
COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 288 Basic stats: 
COMPLETE Column stats: NONE
                         table:
                             input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -437,7 +437,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 288 Basic stats: COMPLETE 
Column stats: NONE
                   TableScan Vectorization:
                       native: true
                       projectedOutputColumns: [0, 1, 2, 3]
@@ -449,19 +449,19 @@ STAGE PLANS:
                         native: true
                         projectedOutputColumns: [4, 5, 8]
                         selectExpressions: VectorUDFAdaptor(regexp_extract(c2, 
'val_([0-9]+)', 1)) -> 4:string, VectorUDFAdaptor(regexp_extract(c4, 
'val_([0-9]+)', 1)) -> 5:string, StringGroupColEqualStringGroupColumn(col 6, 
col 7)(children: VectorUDFAdaptor(regexp_extract(c2, 'val_([0-9]+)', 1)) -> 
6:string, VectorUDFAdaptor(regexp_extract(c4, 'val_([0-9]+)', 1)) -> 7:string) 
-> 8:boolean
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 288 Basic stats: 
COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
                       Limit Vectorization:
                           className: VectorLimitOperator
                           native: true
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 288 Basic stats: 
COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
                         File Sink Vectorization:
                             className: VectorFileSinkOperator
                             native: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 288 Basic stats: 
COMPLETE Column stats: NONE
                         table:
                             input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -531,7 +531,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: varchar_udf_1
-                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 288 Basic stats: COMPLETE 
Column stats: NONE
                   TableScan Vectorization:
                       native: true
                       projectedOutputColumns: [0, 1, 2, 3]
@@ -543,19 +543,19 @@ STAGE PLANS:
                         native: true
                         projectedOutputColumns: [4, 5, 8]
                         selectExpressions: VectorUDFAdaptor(regexp_replace(c2, 
'val', 'replaced')) -> 4:string, VectorUDFAdaptor(regexp_replace(c4, 'val', 
'replaced')) -> 5:string, StringGroupColEqualStringGroupColumn(col 6, col 
7)(children: VectorUDFAdaptor(regexp_replace(c2, 'val', 'replaced')) -> 
6:string, VectorUDFAdaptor(regexp_replace(c4, 'val', 'replaced')) -> 7:string) 
-> 8:boolean
-                    Statistics: Num rows: 1 Data size: 356 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 288 Basic stats: 
COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 1
                       Limit Vectorization:
                           className: VectorLimitOperator
                           native: true
-                      Statistics: Num rows: 1 Data size: 356 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 288 Basic stats: 
COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
                         File Sink Vectorization:
                             className: VectorFileSinkOperator
                             native: false
-                        Statistics: Num rows: 1 Data size: 356 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 288 Basic stats: 
COMPLETE Column stats: NONE
                         table:
                             input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -615,14 +615,14 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: decimal_udf
-                  Statistics: Num rows: 38 Data size: 4296 Basic stats: 
COMPLETE Column stats: NONE
+                  Statistics: Num rows: 38 Data size: 4256 Basic stats: 
COMPLETE Column stats: NONE
                   Select Operator
                     expressions: power(key, 2) (type: double)
                     outputColumnNames: _col0
-                    Statistics: Num rows: 38 Data size: 4296 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 38 Data size: 4256 Basic stats: 
COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
-                      Statistics: Num rows: 38 Data size: 4296 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 38 Data size: 4256 Basic stats: 
COMPLETE Column stats: NONE
                       table:
                           input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                           output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -719,17 +719,17 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: decimal_udf
-                  Statistics: Num rows: 38 Data size: 4296 Basic stats: 
COMPLETE Column stats: NONE
+                  Statistics: Num rows: 38 Data size: 4408 Basic stats: 
COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (key = 10) (type: boolean)
-                    Statistics: Num rows: 19 Data size: 2148 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 5 Data size: 580 Basic stats: 
COMPLETE Column stats: NONE
                     Select Operator
                       expressions: 22026.465794806718 (type: double), 
2.302585092994046 (type: double), 2.302585092994046 (type: double), 1.0 (type: 
double), log(10, value) (type: double), log(value, 10) (type: double), 1.0 
(type: double), 3.1622776601683795 (type: double)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7
-                      Statistics: Num rows: 19 Data size: 2148 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 5 Data size: 580 Basic stats: 
COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 19 Data size: 2148 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 5 Data size: 580 Basic stats: 
COMPLETE Column stats: NONE
                         table:
                             input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -787,14 +787,14 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: decimal_udf
-                  Statistics: Num rows: 38 Data size: 4296 Basic stats: 
COMPLETE Column stats: NONE
+                  Statistics: Num rows: 38 Data size: 4256 Basic stats: 
COMPLETE Column stats: NONE
                   Select Operator
                     expressions: power(key, 2) (type: double)
                     outputColumnNames: _col0
-                    Statistics: Num rows: 38 Data size: 4296 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 38 Data size: 4256 Basic stats: 
COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
-                      Statistics: Num rows: 38 Data size: 4296 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 38 Data size: 4256 Basic stats: 
COMPLETE Column stats: NONE
                       table:
                           input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                           output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -891,17 +891,17 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: decimal_udf
-                  Statistics: Num rows: 38 Data size: 4296 Basic stats: 
COMPLETE Column stats: NONE
+                  Statistics: Num rows: 38 Data size: 4408 Basic stats: 
COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (key = 10) (type: boolean)
-                    Statistics: Num rows: 19 Data size: 2148 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 5 Data size: 580 Basic stats: 
COMPLETE Column stats: NONE
                     Select Operator
                       expressions: 22026.465794806718 (type: double), 
2.302585092994046 (type: double), 2.302585092994046 (type: double), 1.0 (type: 
double), log(10, value) (type: double), log(value, 10) (type: double), 1.0 
(type: double), 3.1622776601683795 (type: double)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7
-                      Statistics: Num rows: 19 Data size: 2148 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 5 Data size: 580 Basic stats: 
COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 19 Data size: 2148 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 5 Data size: 580 Basic stats: 
COMPLETE Column stats: NONE
                         table:
                             input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                             output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -964,7 +964,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: count_case_groupby
-                  Statistics: Num rows: 5 Data size: 452 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE 
Column stats: NONE
                   TableScan Vectorization:
                       native: true
                       projectedOutputColumns: [0, 1]
@@ -976,7 +976,7 @@ STAGE PLANS:
                         native: true
                         projectedOutputColumns: [0, 5]
                         selectExpressions: IfExprLongScalarLongColumn(col 1, 
val 1, col 4)(children: IfExprColumnNull(col 2, col 3, null)(children: 
NotCol(col 1) -> 2:boolean, ConstantVectorExpression(val 0) -> 3:long) -> 
4:int) -> 5:long
-                    Statistics: Num rows: 5 Data size: 452 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 5 Data size: 940 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(_col1)
                       Group By Vectorization:
@@ -991,7 +991,7 @@ STAGE PLANS:
                       keys: _col0 (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 5 Data size: 452 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 5 Data size: 940 Basic stats: 
COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
@@ -1000,7 +1000,7 @@ STAGE PLANS:
                             className: VectorReduceSinkStringOperator
                             native: true
                             nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                        Statistics: Num rows: 5 Data size: 452 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 5 Data size: 940 Basic stats: 
COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -1036,13 +1036,13 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE 
Column stats: NONE
                 File Output Operator
                   compressed: false
                   File Sink Vectorization:
                       className: VectorFileSinkOperator
                       native: false
-                  Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE 
Column stats: NONE
                   table:
                       input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1093,7 +1093,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: count_case_groupby
-                  Statistics: Num rows: 5 Data size: 452 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 5 Data size: 940 Basic stats: COMPLETE 
Column stats: NONE
                   TableScan Vectorization:
                       native: true
                       projectedOutputColumns: [0, 1]
@@ -1105,7 +1105,7 @@ STAGE PLANS:
                         native: true
                         projectedOutputColumns: [0, 5]
                         selectExpressions: IfExprLongScalarLongColumn(col 1, 
val 1, col 4)(children: IfExprColumnNull(col 2, col 3, null)(children: 
NotCol(col 1) -> 2:boolean, ConstantVectorExpression(val 0) -> 3:long) -> 
4:int) -> 5:long
-                    Statistics: Num rows: 5 Data size: 452 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 5 Data size: 940 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(_col1)
                       Group By Vectorization:
@@ -1120,7 +1120,7 @@ STAGE PLANS:
                       keys: _col0 (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 5 Data size: 452 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 5 Data size: 940 Basic stats: 
COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
@@ -1129,7 +1129,7 @@ STAGE PLANS:
                             className: VectorReduceSinkStringOperator
                             native: true
                             nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
-                        Statistics: Num rows: 5 Data size: 452 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 5 Data size: 940 Basic stats: 
COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -1165,13 +1165,13 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE 
Column stats: NONE
                 File Output Operator
                   compressed: false
                   File Sink Vectorization:
                       className: VectorFileSinkOperator
                       native: false
-                  Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE 
Column stats: NONE
                   table:
                       input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/8f7c5788/ql/src/test/results/clientpositive/llap/vector_aggregate_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_aggregate_9.q.out 
b/ql/src/test/results/clientpositive/llap/vector_aggregate_9.q.out
index cf3dc23..e05ff91 100644
--- a/ql/src/test/results/clientpositive/llap/vector_aggregate_9.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_aggregate_9.q.out
@@ -127,7 +127,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: vectortab2korc
-                  Statistics: Num rows: 2000 Data size: 918712 Basic stats: 
COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2000 Data size: 212912 Basic stats: 
COMPLETE Column stats: NONE
                   TableScan Vectorization:
                       native: true
                       projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12]
@@ -138,7 +138,7 @@ STAGE PLANS:
                         className: VectorSelectOperator
                         native: true
                         projectedOutputColumns: [6]
-                    Statistics: Num rows: 2000 Data size: 918712 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2000 Data size: 212912 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: min(dc), max(dc), sum(dc), avg(dc)
                       Group By Vectorization:
@@ -151,7 +151,7 @@ STAGE PLANS:
                           projectedOutputColumns: [0, 1, 2, 3]
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
-                      Statistics: Num rows: 1 Data size: 624 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 736 Basic stats: 
COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
                         Reduce Sink Vectorization:
@@ -160,7 +160,7 @@ STAGE PLANS:
                             native: true
                             nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                             valueColumns: [0, 1, 2, 3]
-                        Statistics: Num rows: 1 Data size: 624 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 736 Basic stats: 
COMPLETE Column stats: NONE
                         value expressions: _col0 (type: decimal(38,18)), _col1 
(type: decimal(38,18)), _col2 (type: decimal(38,18)), _col3 (type: 
struct<count:bigint,sum:decimal(38,18),input:decimal(38,18)>)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -205,13 +205,13 @@ STAGE PLANS:
                     projectedOutputColumns: [0, 1, 2, 3]
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 1 Data size: 624 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 736 Basic stats: COMPLETE 
Column stats: NONE
                 File Output Operator
                   compressed: false
                   File Sink Vectorization:
                       className: VectorFileSinkOperator
                       native: false
-                  Statistics: Num rows: 1 Data size: 624 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 736 Basic stats: COMPLETE 
Column stats: NONE
                   table:
                       input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -258,7 +258,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: vectortab2korc
-                  Statistics: Num rows: 2000 Data size: 918712 Basic stats: 
COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2000 Data size: 15208 Basic stats: 
COMPLETE Column stats: NONE
                   TableScan Vectorization:
                       native: true
                       projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12]
@@ -269,7 +269,7 @@ STAGE PLANS:
                         className: VectorSelectOperator
                         native: true
                         projectedOutputColumns: [5]
-                    Statistics: Num rows: 2000 Data size: 918712 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2000 Data size: 15208 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: min(d), max(d), sum(d), avg(d)
                       Group By Vectorization:
@@ -282,7 +282,7 @@ STAGE PLANS:
                           projectedOutputColumns: [0, 1, 2, 3]
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
-                      Statistics: Num rows: 1 Data size: 104 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 112 Basic stats: 
COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
                         Reduce Sink Vectorization:
@@ -291,7 +291,7 @@ STAGE PLANS:
                             native: true
                             nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                             valueColumns: [0, 1, 2, 3]
-                        Statistics: Num rows: 1 Data size: 104 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 112 Basic stats: 
COMPLETE Column stats: NONE
                         value expressions: _col0 (type: double), _col1 (type: 
double), _col2 (type: double), _col3 (type: 
struct<count:bigint,sum:double,input:double>)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -336,13 +336,13 @@ STAGE PLANS:
                     projectedOutputColumns: [0, 1, 2, 3]
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 1 Data size: 104 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE 
Column stats: NONE
                 File Output Operator
                   compressed: false
                   File Sink Vectorization:
                       className: VectorFileSinkOperator
                       native: false
-                  Statistics: Num rows: 1 Data size: 104 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE 
Column stats: NONE
                   table:
                       input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -389,7 +389,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: vectortab2korc
-                  Statistics: Num rows: 2000 Data size: 918712 Basic stats: 
COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2000 Data size: 76040 Basic stats: 
COMPLETE Column stats: NONE
                   TableScan Vectorization:
                       native: true
                       projectedOutputColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 
10, 11, 12]
@@ -400,7 +400,7 @@ STAGE PLANS:
                         className: VectorSelectOperator
                         native: true
                         projectedOutputColumns: [10]
-                    Statistics: Num rows: 2000 Data size: 918712 Basic stats: 
COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2000 Data size: 76040 Basic stats: 
COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: min(ts), max(ts), sum(ts), avg(ts)
                       Group By Vectorization:
@@ -413,7 +413,7 @@ STAGE PLANS:
                           projectedOutputColumns: [0, 1, 2, 3]
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
-                      Statistics: Num rows: 1 Data size: 200 Basic stats: 
COMPLETE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 240 Basic stats: 
COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
                         Reduce Sink Vectorization:
@@ -422,7 +422,7 @@ STAGE PLANS:
                             native: true
                             nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, 
BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                             valueColumns: [0, 1, 2, 3]
-                        Statistics: Num rows: 1 Data size: 200 Basic stats: 
COMPLETE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 240 Basic stats: 
COMPLETE Column stats: NONE
                         value expressions: _col0 (type: timestamp), _col1 
(type: timestamp), _col2 (type: double), _col3 (type: 
struct<count:bigint,sum:double,input:timestamp>)
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -467,13 +467,13 @@ STAGE PLANS:
                     projectedOutputColumns: [0, 1, 2, 3]
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE 
Column stats: NONE
+                Statistics: Num rows: 1 Data size: 240 Basic stats: COMPLETE 
Column stats: NONE
                 File Output Operator
                   compressed: false
                   File Sink Vectorization:
                       className: VectorFileSinkOperator
                       native: false
-                  Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE 
Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 240 Basic stats: COMPLETE 
Column stats: NONE
                   table:
                       input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/8f7c5788/ql/src/test/results/clientpositive/llap/vector_aggregate_without_gby.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/llap/vector_aggregate_without_gby.q.out 
b/ql/src/test/results/clientpositive/llap/vector_aggregate_without_gby.q.out
index 0cf62d3..8113e74 100644
--- a/ql/src/test/results/clientpositive/llap/vector_aggregate_without_gby.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_aggregate_without_gby.q.out
@@ -46,17 +46,17 @@ Stage-0
     Stage-1
       Reducer 2 vectorized, llap
       File Output Operator [FS_14]
-        Group By Operator [GBY_13] (rows=1 width=188)
+        Group By Operator [GBY_13] (rows=1 width=380)
           
Output:["_col0","_col1"],aggregations:["max(VALUE._col0)","max(VALUE._col1)"]
         <-Map 1 [CUSTOM_SIMPLE_EDGE] vectorized, llap
           PARTITION_ONLY_SHUFFLE [RS_12]
-            Group By Operator [GBY_11] (rows=1 width=188)
+            Group By Operator [GBY_11] (rows=1 width=380)
               Output:["_col0","_col1"],aggregations:["max(dt)","max(greg_dt)"]
-              Select Operator [SEL_10] (rows=3 width=102)
+              Select Operator [SEL_10] (rows=7 width=192)
                 Output:["dt","greg_dt"]
-                Filter Operator [FIL_9] (rows=3 width=102)
+                Filter Operator [FIL_9] (rows=7 width=192)
                   predicate:(id = 5)
-                  TableScan [TS_0] (rows=7 width=102)
+                  TableScan [TS_0] (rows=7 width=192)
                     
default@testvec,testvec,Tbl:COMPLETE,Col:NONE,Output:["id","dt","greg_dt"]
 
 PREHOOK: query: select max(dt), max(greg_dt) from testvec where id=5

Reply via email to