Repository: hive
Updated Branches:
  refs/heads/master 6a282657c -> cc38bcc5a


http://git-wip-us.apache.org/repos/asf/hive/blob/cc38bcc5/ql/src/test/results/clientpositive/llap/vector_windowing_order_null.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/llap/vector_windowing_order_null.q.out 
b/ql/src/test/results/clientpositive/llap/vector_windowing_order_null.q.out
index 91b52e7..7b6fa66 100644
--- a/ql/src/test/results/clientpositive/llap/vector_windowing_order_null.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_windowing_order_null.q.out
@@ -113,16 +113,28 @@ STAGE PLANS:
                     partitionColumnCount: 0
                     scratchColumnTypeNames: []
         Reducer 2 
-            Execution mode: llap
+            Execution mode: vectorized, llap
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF operator: sum UNBOUNDED end frame is 
not supported for ROWS window type
-                vectorized: false
+                reduceColumnNullOrder: aza
+                reduceColumnSortOrder: +++
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 3
+                    dataColumns: KEY.reducesinkkey0:int, 
KEY.reducesinkkey1:string, KEY.reducesinkkey2:bigint
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [bigint]
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: int), 
KEY.reducesinkkey2 (type: bigint), KEY.reducesinkkey1 (type: string)
                 outputColumnNames: _col2, _col3, _col7
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [0, 2, 1]
                 Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE 
Column stats: NONE
                 PTF Operator
                   Function definitions:
@@ -143,16 +155,39 @@ STAGE PLANS:
                               name: sum
                               window function: GenericUDAFSumLong
                               window frame: ROWS PRECEDING(MAX)~CURRENT
+                  PTF Vectorization:
+                      className: VectorPTFOperator
+                      evaluatorClasses: [VectorPTFEvaluatorStreamingLongSum]
+                      functionInputExpressions: [col 2:bigint]
+                      functionNames: [sum]
+                      keyInputColumns: [0, 2, 1]
+                      native: true
+                      nonKeyInputColumns: []
+                      orderExpressions: [col 1:string, col 2:bigint]
+                      outputColumns: [3, 0, 2, 1]
+                      outputTypes: [bigint, int, bigint, string]
+                      partitionExpressions: [col 0:int]
+                      streamingColumns: [3]
                   Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: _col2 (type: int), _col7 (type: string), 
_col3 (type: bigint), sum_window_0 (type: bigint)
                     outputColumnNames: _col0, _col1, _col2, _col3
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [0, 1, 2, 3]
                     Statistics: Num rows: 1 Data size: 196 Basic stats: 
COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 10
+                      Limit Vectorization:
+                          className: VectorLimitOperator
+                          native: true
                       Statistics: Num rows: 1 Data size: 196 Basic stats: 
COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
+                        File Sink Vectorization:
+                            className: VectorFileSinkOperator
+                            native: false
                         Statistics: Num rows: 1 Data size: 196 Basic stats: 
COMPLETE Column stats: NONE
                         table:
                             input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -245,16 +280,28 @@ STAGE PLANS:
                     partitionColumnCount: 0
                     scratchColumnTypeNames: []
         Reducer 2 
-            Execution mode: llap
+            Execution mode: vectorized, llap
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF operator: sum UNBOUNDED end frame is 
not supported for ROWS window type
-                vectorized: false
+                reduceColumnNullOrder: aaa
+                reduceColumnSortOrder: ++-
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 3
+                    dataColumns: KEY.reducesinkkey0:double, 
KEY.reducesinkkey1:string, KEY.reducesinkkey2:float
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [double]
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey2 (type: float), 
KEY.reducesinkkey0 (type: double), KEY.reducesinkkey1 (type: string)
                 outputColumnNames: _col4, _col5, _col7
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [2, 0, 1]
                 Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE 
Column stats: NONE
                 PTF Operator
                   Function definitions:
@@ -275,16 +322,39 @@ STAGE PLANS:
                               name: sum
                               window function: GenericUDAFSumDouble
                               window frame: ROWS PRECEDING(MAX)~CURRENT
+                  PTF Vectorization:
+                      className: VectorPTFOperator
+                      evaluatorClasses: [VectorPTFEvaluatorStreamingDoubleSum]
+                      functionInputExpressions: [col 2:float]
+                      functionNames: [sum]
+                      keyInputColumns: [2, 0, 1]
+                      native: true
+                      nonKeyInputColumns: []
+                      orderExpressions: [col 1:string, col 2:float]
+                      outputColumns: [3, 2, 0, 1]
+                      outputTypes: [double, float, double, string]
+                      partitionExpressions: [col 0:double]
+                      streamingColumns: [3]
                   Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: _col5 (type: double), _col7 (type: string), 
_col4 (type: float), sum_window_0 (type: double)
                     outputColumnNames: _col0, _col1, _col2, _col3
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [0, 1, 2, 3]
                     Statistics: Num rows: 1 Data size: 196 Basic stats: 
COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 10
+                      Limit Vectorization:
+                          className: VectorLimitOperator
+                          native: true
                       Statistics: Num rows: 1 Data size: 196 Basic stats: 
COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
+                        File Sink Vectorization:
+                            className: VectorFileSinkOperator
+                            native: false
                         Statistics: Num rows: 1 Data size: 196 Basic stats: 
COMPLETE Column stats: NONE
                         table:
                             input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/cc38bcc5/ql/src/test/results/clientpositive/llap/vector_windowing_windowspec.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/llap/vector_windowing_windowspec.q.out 
b/ql/src/test/results/clientpositive/llap/vector_windowing_windowspec.q.out
index 93b8655..584453c 100644
--- a/ql/src/test/results/clientpositive/llap/vector_windowing_windowspec.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_windowing_windowspec.q.out
@@ -105,16 +105,28 @@ STAGE PLANS:
                     partitionColumnCount: 0
                     scratchColumnTypeNames: []
         Reducer 2 
-            Execution mode: llap
+            Execution mode: vectorized, llap
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF operator: sum UNBOUNDED end frame is 
not supported for ROWS window type
-                vectorized: false
+                reduceColumnNullOrder: aaa
+                reduceColumnSortOrder: +++
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 3
+                    dataColumns: KEY.reducesinkkey0:int, 
KEY.reducesinkkey1:string, KEY.reducesinkkey2:bigint
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [bigint]
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: int), 
KEY.reducesinkkey2 (type: bigint), KEY.reducesinkkey1 (type: string)
                 outputColumnNames: _col2, _col3, _col7
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [0, 2, 1]
                 Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE 
Column stats: NONE
                 PTF Operator
                   Function definitions:
@@ -135,16 +147,39 @@ STAGE PLANS:
                               name: sum
                               window function: GenericUDAFSumLong
                               window frame: ROWS PRECEDING(MAX)~CURRENT
+                  PTF Vectorization:
+                      className: VectorPTFOperator
+                      evaluatorClasses: [VectorPTFEvaluatorStreamingLongSum]
+                      functionInputExpressions: [col 2:bigint]
+                      functionNames: [sum]
+                      keyInputColumns: [0, 2, 1]
+                      native: true
+                      nonKeyInputColumns: []
+                      orderExpressions: [col 1:string, col 2:bigint]
+                      outputColumns: [3, 0, 2, 1]
+                      outputTypes: [bigint, int, bigint, string]
+                      partitionExpressions: [col 0:int]
+                      streamingColumns: [3]
                   Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: _col7 (type: string), sum_window_0 (type: 
bigint)
                     outputColumnNames: _col0, _col1
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [1, 3]
                     Statistics: Num rows: 1 Data size: 196 Basic stats: 
COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 100
+                      Limit Vectorization:
+                          className: VectorLimitOperator
+                          native: true
                       Statistics: Num rows: 1 Data size: 196 Basic stats: 
COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
+                        File Sink Vectorization:
+                            className: VectorFileSinkOperator
+                            native: false
                         Statistics: Num rows: 1 Data size: 196 Basic stats: 
COMPLETE Column stats: NONE
                         table:
                             input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -327,16 +362,28 @@ STAGE PLANS:
                     partitionColumnCount: 0
                     scratchColumnTypeNames: []
         Reducer 2 
-            Execution mode: llap
+            Execution mode: vectorized, llap
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF operator: sum UNBOUNDED end frame is 
not supported for ROWS window type
-                vectorized: false
+                reduceColumnNullOrder: aaa
+                reduceColumnSortOrder: +++
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 3
+                    dataColumns: KEY.reducesinkkey0:double, 
KEY.reducesinkkey1:string, KEY.reducesinkkey2:float
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [double]
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey2 (type: float), 
KEY.reducesinkkey0 (type: double), KEY.reducesinkkey1 (type: string)
                 outputColumnNames: _col4, _col5, _col7
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [2, 0, 1]
                 Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE 
Column stats: NONE
                 PTF Operator
                   Function definitions:
@@ -357,16 +404,39 @@ STAGE PLANS:
                               name: sum
                               window function: GenericUDAFSumDouble
                               window frame: ROWS PRECEDING(MAX)~CURRENT
+                  PTF Vectorization:
+                      className: VectorPTFOperator
+                      evaluatorClasses: [VectorPTFEvaluatorStreamingDoubleSum]
+                      functionInputExpressions: [col 2:float]
+                      functionNames: [sum]
+                      keyInputColumns: [2, 0, 1]
+                      native: true
+                      nonKeyInputColumns: []
+                      orderExpressions: [col 1:string, col 2:float]
+                      outputColumns: [3, 2, 0, 1]
+                      outputTypes: [double, float, double, string]
+                      partitionExpressions: [col 0:double]
+                      streamingColumns: [3]
                   Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE 
Column stats: NONE
                   Select Operator
                     expressions: _col7 (type: string), sum_window_0 (type: 
double)
                     outputColumnNames: _col0, _col1
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [1, 3]
                     Statistics: Num rows: 1 Data size: 196 Basic stats: 
COMPLETE Column stats: NONE
                     Limit
                       Number of rows: 100
+                      Limit Vectorization:
+                          className: VectorLimitOperator
+                          native: true
                       Statistics: Num rows: 1 Data size: 196 Basic stats: 
COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
+                        File Sink Vectorization:
+                            className: VectorFileSinkOperator
+                            native: false
                         Statistics: Num rows: 1 Data size: 196 Basic stats: 
COMPLETE Column stats: NONE
                         table:
                             input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/cc38bcc5/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out 
b/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out
index cc6631e..b6b6cc2 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_ptf.q.out
@@ -212,16 +212,28 @@ STAGE PLANS:
                     Statistics: Num rows: 26 Data size: 12974 Basic stats: 
COMPLETE Column stats: COMPLETE
                     value expressions: _col5 (type: int), _col7 (type: double)
         Reducer 3 
-            Execution mode: llap
+            Execution mode: vectorized, llap
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF operator: sum UNBOUNDED end frame is 
not supported for ROWS window type
-                vectorized: false
+                reduceColumnNullOrder: aa
+                reduceColumnSortOrder: ++
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 4
+                    dataColumns: KEY.reducesinkkey0:string, 
KEY.reducesinkkey1:string, VALUE._col3:int, VALUE._col5:double
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [bigint, bigint, double, double]
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), 
KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: 
double)
                 outputColumnNames: _col1, _col2, _col5, _col7
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [1, 0, 2, 3]
                 Statistics: Num rows: 26 Data size: 12974 Basic stats: 
COMPLETE Column stats: COMPLETE
                 PTF Operator
                   Function definitions:
@@ -256,13 +268,34 @@ STAGE PLANS:
                               name: sum
                               window function: GenericUDAFSumDouble
                               window frame: ROWS PRECEDING(MAX)~CURRENT
+                  PTF Vectorization:
+                      className: VectorPTFOperator
+                      evaluatorClasses: [VectorPTFEvaluatorRank, 
VectorPTFEvaluatorDenseRank, VectorPTFEvaluatorStreamingDoubleSum]
+                      functionInputExpressions: [col 1:string, col 1:string, 
col 3:double]
+                      functionNames: [rank, dense_rank, sum]
+                      keyInputColumns: [1, 0]
+                      native: true
+                      nonKeyInputColumns: [2, 3]
+                      orderExpressions: [col 1:string]
+                      outputColumns: [4, 5, 6, 1, 0, 2, 3]
+                      outputTypes: [int, int, double, string, string, int, 
double]
+                      partitionExpressions: [col 0:string]
+                      streamingColumns: [4, 5, 6]
                   Statistics: Num rows: 26 Data size: 12974 Basic stats: 
COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: _col2 (type: string), _col1 (type: string), 
_col5 (type: int), rank_window_0 (type: int), dense_rank_window_1 (type: int), 
round(sum_window_2, 2) (type: double)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [0, 1, 2, 4, 5, 7]
+                        selectExpressions: 
RoundWithNumDigitsDoubleToDouble(col 6, decimalPlaces 2) -> 7:double
                     Statistics: Num rows: 26 Data size: 6214 Basic stats: 
COMPLETE Column stats: COMPLETE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 26 Data size: 6214 Basic stats: 
COMPLETE Column stats: COMPLETE
                       table:
                           input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -846,16 +879,28 @@ STAGE PLANS:
                     Statistics: Num rows: 26 Data size: 12974 Basic stats: 
COMPLETE Column stats: COMPLETE
                     value expressions: _col5 (type: int), _col7 (type: double)
         Reducer 3 
-            Execution mode: llap
+            Execution mode: vectorized, llap
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF operator: sum UNBOUNDED end frame is 
not supported for ROWS window type
-                vectorized: false
+                reduceColumnNullOrder: aa
+                reduceColumnSortOrder: ++
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 4
+                    dataColumns: KEY.reducesinkkey0:string, 
KEY.reducesinkkey1:string, VALUE._col3:int, VALUE._col5:double
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [bigint, bigint, double, double]
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), 
KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: 
double)
                 outputColumnNames: _col1, _col2, _col5, _col7
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [1, 0, 2, 3]
                 Statistics: Num rows: 26 Data size: 12974 Basic stats: 
COMPLETE Column stats: COMPLETE
                 PTF Operator
                   Function definitions:
@@ -890,13 +935,34 @@ STAGE PLANS:
                               name: sum
                               window function: GenericUDAFSumDouble
                               window frame: ROWS PRECEDING(MAX)~CURRENT
+                  PTF Vectorization:
+                      className: VectorPTFOperator
+                      evaluatorClasses: [VectorPTFEvaluatorRank, 
VectorPTFEvaluatorDenseRank, VectorPTFEvaluatorStreamingDoubleSum]
+                      functionInputExpressions: [col 1:string, col 1:string, 
col 3:double]
+                      functionNames: [rank, dense_rank, sum]
+                      keyInputColumns: [1, 0]
+                      native: true
+                      nonKeyInputColumns: [2, 3]
+                      orderExpressions: [col 1:string]
+                      outputColumns: [4, 5, 6, 1, 0, 2, 3]
+                      outputTypes: [int, int, double, string, string, int, 
double]
+                      partitionExpressions: [col 0:string]
+                      streamingColumns: [4, 5, 6]
                   Statistics: Num rows: 26 Data size: 12974 Basic stats: 
COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: _col2 (type: string), _col1 (type: string), 
_col5 (type: int), rank_window_0 (type: int), dense_rank_window_1 (type: int), 
round(sum_window_2, 2) (type: double)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [0, 1, 2, 4, 5, 7]
+                        selectExpressions: 
RoundWithNumDigitsDoubleToDouble(col 6, decimalPlaces 2) -> 7:double
                     Statistics: Num rows: 26 Data size: 6214 Basic stats: 
COMPLETE Column stats: COMPLETE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 26 Data size: 6214 Basic stats: 
COMPLETE Column stats: COMPLETE
                       table:
                           input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -2141,16 +2207,28 @@ STAGE PLANS:
                     Statistics: Num rows: 26 Data size: 12974 Basic stats: 
COMPLETE Column stats: COMPLETE
                     value expressions: _col5 (type: int), _col7 (type: double)
         Reducer 3 
-            Execution mode: llap
+            Execution mode: vectorized, llap
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF operator: sum UNBOUNDED end frame is 
not supported for ROWS window type
-                vectorized: false
+                reduceColumnNullOrder: aa
+                reduceColumnSortOrder: ++
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 4
+                    dataColumns: KEY.reducesinkkey0:string, 
KEY.reducesinkkey1:string, VALUE._col3:int, VALUE._col5:double
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [bigint, bigint, double, double]
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), 
KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: 
double)
                 outputColumnNames: _col1, _col2, _col5, _col7
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [1, 0, 2, 3]
                 Statistics: Num rows: 26 Data size: 12974 Basic stats: 
COMPLETE Column stats: COMPLETE
                 PTF Operator
                   Function definitions:
@@ -2185,13 +2263,34 @@ STAGE PLANS:
                               name: sum
                               window function: GenericUDAFSumDouble
                               window frame: ROWS PRECEDING(MAX)~CURRENT
+                  PTF Vectorization:
+                      className: VectorPTFOperator
+                      evaluatorClasses: [VectorPTFEvaluatorRank, 
VectorPTFEvaluatorDenseRank, VectorPTFEvaluatorStreamingDoubleSum]
+                      functionInputExpressions: [col 1:string, col 1:string, 
col 3:double]
+                      functionNames: [rank, dense_rank, sum]
+                      keyInputColumns: [1, 0]
+                      native: true
+                      nonKeyInputColumns: [2, 3]
+                      orderExpressions: [col 1:string]
+                      outputColumns: [4, 5, 6, 1, 0, 2, 3]
+                      outputTypes: [int, int, double, string, string, int, 
double]
+                      partitionExpressions: [col 0:string]
+                      streamingColumns: [4, 5, 6]
                   Statistics: Num rows: 26 Data size: 12974 Basic stats: 
COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: _col2 (type: string), _col1 (type: string), 
_col5 (type: int), rank_window_0 (type: int), dense_rank_window_1 (type: int), 
round(sum_window_2, 2) (type: double)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [0, 1, 2, 4, 5, 7]
+                        selectExpressions: 
RoundWithNumDigitsDoubleToDouble(col 6, decimalPlaces 2) -> 7:double
                     Statistics: Num rows: 26 Data size: 6214 Basic stats: 
COMPLETE Column stats: COMPLETE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 26 Data size: 6214 Basic stats: 
COMPLETE Column stats: COMPLETE
                       table:
                           input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -2356,16 +2455,28 @@ STAGE PLANS:
                     Statistics: Num rows: 26 Data size: 12974 Basic stats: 
COMPLETE Column stats: COMPLETE
                     value expressions: _col5 (type: int), _col7 (type: double)
         Reducer 3 
-            Execution mode: llap
+            Execution mode: vectorized, llap
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF operator: sum UNBOUNDED end frame is 
not supported for ROWS window type
-                vectorized: false
+                reduceColumnNullOrder: aa
+                reduceColumnSortOrder: ++
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 4
+                    dataColumns: KEY.reducesinkkey0:string, 
KEY.reducesinkkey1:string, VALUE._col3:int, VALUE._col5:double
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [bigint, bigint, double, double]
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), 
KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: 
double)
                 outputColumnNames: _col1, _col2, _col5, _col7
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [1, 0, 2, 3]
                 Statistics: Num rows: 26 Data size: 12974 Basic stats: 
COMPLETE Column stats: COMPLETE
                 PTF Operator
                   Function definitions:
@@ -2400,13 +2511,34 @@ STAGE PLANS:
                               name: sum
                               window function: GenericUDAFSumDouble
                               window frame: ROWS PRECEDING(MAX)~CURRENT
+                  PTF Vectorization:
+                      className: VectorPTFOperator
+                      evaluatorClasses: [VectorPTFEvaluatorRank, 
VectorPTFEvaluatorDenseRank, VectorPTFEvaluatorStreamingDoubleSum]
+                      functionInputExpressions: [col 1:string, col 1:string, 
col 3:double]
+                      functionNames: [rank, dense_rank, sum]
+                      keyInputColumns: [1, 0]
+                      native: true
+                      nonKeyInputColumns: [2, 3]
+                      orderExpressions: [col 1:string]
+                      outputColumns: [4, 5, 6, 1, 0, 2, 3]
+                      outputTypes: [int, int, double, string, string, int, 
double]
+                      partitionExpressions: [col 0:string]
+                      streamingColumns: [4, 5, 6]
                   Statistics: Num rows: 26 Data size: 12974 Basic stats: 
COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: _col2 (type: string), _col1 (type: string), 
_col5 (type: int), rank_window_0 (type: int), dense_rank_window_1 (type: int), 
round(sum_window_2, 2) (type: double)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [0, 1, 2, 4, 5, 7]
+                        selectExpressions: 
RoundWithNumDigitsDoubleToDouble(col 6, decimalPlaces 2) -> 7:double
                     Statistics: Num rows: 26 Data size: 6214 Basic stats: 
COMPLETE Column stats: COMPLETE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 26 Data size: 6214 Basic stats: 
COMPLETE Column stats: COMPLETE
                       table:
                           input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -2637,16 +2769,28 @@ STAGE PLANS:
                     Statistics: Num rows: 26 Data size: 12974 Basic stats: 
COMPLETE Column stats: COMPLETE
                     value expressions: _col5 (type: int), _col7 (type: double)
         Reducer 4 
-            Execution mode: llap
+            Execution mode: vectorized, llap
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF operator: sum UNBOUNDED end frame is 
not supported for ROWS window type
-                vectorized: false
+                reduceColumnNullOrder: aa
+                reduceColumnSortOrder: ++
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 4
+                    dataColumns: KEY.reducesinkkey0:string, 
KEY.reducesinkkey1:string, VALUE._col3:int, VALUE._col5:double
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [bigint, bigint, double, double]
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), 
KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: 
double)
                 outputColumnNames: _col1, _col2, _col5, _col7
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [1, 0, 2, 3]
                 Statistics: Num rows: 26 Data size: 12974 Basic stats: 
COMPLETE Column stats: COMPLETE
                 PTF Operator
                   Function definitions:
@@ -2681,13 +2825,34 @@ STAGE PLANS:
                               name: sum
                               window function: GenericUDAFSumDouble
                               window frame: ROWS PRECEDING(MAX)~CURRENT
+                  PTF Vectorization:
+                      className: VectorPTFOperator
+                      evaluatorClasses: [VectorPTFEvaluatorRank, 
VectorPTFEvaluatorDenseRank, VectorPTFEvaluatorStreamingDoubleSum]
+                      functionInputExpressions: [col 1:string, col 1:string, 
col 3:double]
+                      functionNames: [rank, dense_rank, sum]
+                      keyInputColumns: [1, 0]
+                      native: true
+                      nonKeyInputColumns: [2, 3]
+                      orderExpressions: [col 1:string]
+                      outputColumns: [4, 5, 6, 1, 0, 2, 3]
+                      outputTypes: [int, int, double, string, string, int, 
double]
+                      partitionExpressions: [col 0:string]
+                      streamingColumns: [4, 5, 6]
                   Statistics: Num rows: 26 Data size: 12974 Basic stats: 
COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: _col2 (type: string), _col1 (type: string), 
_col5 (type: int), rank_window_0 (type: int), dense_rank_window_1 (type: int), 
round(sum_window_2, 2) (type: double)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [0, 1, 2, 4, 5, 7]
+                        selectExpressions: 
RoundWithNumDigitsDoubleToDouble(col 6, decimalPlaces 2) -> 7:double
                     Statistics: Num rows: 26 Data size: 6214 Basic stats: 
COMPLETE Column stats: COMPLETE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 26 Data size: 6214 Basic stats: 
COMPLETE Column stats: COMPLETE
                       table:
                           input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -3152,7 +3317,7 @@ STAGE PLANS:
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF operator: sum UNBOUNDED end frame is 
not supported for ROWS window type
+                notVectorizedReason: PTF operator: lag not in supported 
functions [avg, count, dense_rank, first_value, last_value, max, min, rank, 
row_number, sum]
                 vectorized: false
             Reduce Operator Tree:
               Select Operator
@@ -3889,7 +4054,7 @@ STAGE PLANS:
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF operator: sum UNBOUNDED end frame is 
not supported for ROWS window type
+                notVectorizedReason: Aggregation Function expression for 
GROUPBY operator: UDF compute_stats not supported
                 vectorized: false
             Reduce Operator Tree:
               Select Operator
@@ -4784,16 +4949,28 @@ STAGE PLANS:
                     Statistics: Num rows: 26 Data size: 12766 Basic stats: 
COMPLETE Column stats: COMPLETE
                     value expressions: _col5 (type: int)
         Reducer 5 
-            Execution mode: llap
+            Execution mode: vectorized, llap
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine tez IN [tez, spark] IS true
-                notVectorizedReason: PTF operator: sum UNBOUNDED end frame is 
not supported for ROWS window type
-                vectorized: false
+                reduceColumnNullOrder: aa
+                reduceColumnSortOrder: ++
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 3
+                    dataColumns: KEY.reducesinkkey0:string, 
KEY.reducesinkkey1:string, VALUE._col3:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [bigint, bigint, bigint]
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), 
KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
                 outputColumnNames: _col1, _col2, _col5
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [1, 0, 2]
                 Statistics: Num rows: 26 Data size: 12766 Basic stats: 
COMPLETE Column stats: COMPLETE
                 PTF Operator
                   Function definitions:
@@ -4828,13 +5005,33 @@ STAGE PLANS:
                               name: sum
                               window function: GenericUDAFSumLong
                               window frame: ROWS PRECEDING(MAX)~CURRENT
+                  PTF Vectorization:
+                      className: VectorPTFOperator
+                      evaluatorClasses: [VectorPTFEvaluatorRank, 
VectorPTFEvaluatorDenseRank, VectorPTFEvaluatorStreamingLongSum]
+                      functionInputExpressions: [col 1:string, col 1:string, 
col 2:int]
+                      functionNames: [rank, dense_rank, sum]
+                      keyInputColumns: [1, 0]
+                      native: true
+                      nonKeyInputColumns: [2]
+                      orderExpressions: [col 1:string]
+                      outputColumns: [3, 4, 5, 1, 0, 2]
+                      outputTypes: [int, int, bigint, string, string, int]
+                      partitionExpressions: [col 0:string]
+                      streamingColumns: [3, 4, 5]
                   Statistics: Num rows: 26 Data size: 12766 Basic stats: 
COMPLETE Column stats: COMPLETE
                   Select Operator
                     expressions: _col2 (type: string), _col1 (type: string), 
rank_window_0 (type: int), dense_rank_window_1 (type: int), _col5 (type: int), 
sum_window_2 (type: bigint)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [0, 1, 3, 4, 2, 5]
                     Statistics: Num rows: 26 Data size: 6214 Basic stats: 
COMPLETE Column stats: COMPLETE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 26 Data size: 6214 Basic stats: 
COMPLETE Column stats: COMPLETE
                       table:
                           input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/cc38bcc5/ql/src/test/results/clientpositive/perf/spark/query51.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/spark/query51.q.out 
b/ql/src/test/results/clientpositive/perf/spark/query51.q.out
index c0bb72b..21afbe2 100644
--- a/ql/src/test/results/clientpositive/perf/spark/query51.q.out
+++ b/ql/src/test/results/clientpositive/perf/spark/query51.q.out
@@ -284,6 +284,7 @@ STAGE PLANS:
                   Statistics: Num rows: 348477374 Data size: 30742775095 Basic 
stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: int), _col1 (type: string), 
_col2 (type: decimal(27,2)), _col3 (type: int), _col4 (type: string), _col5 
(type: decimal(27,2))
         Reducer 4 
+            Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: int), VALUE._col1 (type: 
string), VALUE._col2 (type: decimal(27,2)), VALUE._col3 (type: int), 
VALUE._col4 (type: string), VALUE._col5 (type: decimal(27,2))

http://git-wip-us.apache.org/repos/asf/hive/blob/cc38bcc5/ql/src/test/results/clientpositive/perf/tez/query51.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query51.q.out 
b/ql/src/test/results/clientpositive/perf/tez/query51.q.out
index be123ae..ba3f994 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query51.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query51.q.out
@@ -106,20 +106,20 @@ Stage-0
     limit:100
     Stage-1
       Reducer 6 vectorized
-      File Output Operator [FS_113]
-        Limit [LIM_112] (rows=100 width=88)
+      File Output Operator [FS_117]
+        Limit [LIM_116] (rows=100 width=88)
           Number of rows:100
-          Select Operator [SEL_111] (rows=116159124 width=88)
+          Select Operator [SEL_115] (rows=116159124 width=88)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-          <-Reducer 5 [SIMPLE_EDGE]
-            SHUFFLE [RS_50]
-              Select Operator [SEL_46] (rows=116159124 width=88)
+          <-Reducer 5 [SIMPLE_EDGE] vectorized
+            SHUFFLE [RS_114]
+              Select Operator [SEL_113] (rows=116159124 width=88)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                Filter Operator [FIL_58] (rows=116159124 width=88)
+                Filter Operator [FIL_112] (rows=116159124 width=88)
                   predicate:(max_window_0 > max_window_1)
-                  PTF Operator [PTF_45] (rows=348477374 width=88)
+                  PTF Operator [PTF_111] (rows=348477374 width=88)
                     Function 
definitions:[{},{"name:":"windowingtablefunction","order by:":"CASE WHEN (_col4 
is not null) THEN (_col4) ELSE (_col1) END ASC NULLS FIRST","partition 
by:":"CASE WHEN (_col3 is not null) THEN (_col3) ELSE (_col0) END"}]
-                    Select Operator [SEL_44] (rows=348477374 width=88)
+                    Select Operator [SEL_110] (rows=348477374 width=88)
                       Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
                     <-Reducer 4 [SIMPLE_EDGE]
                       SHUFFLE [RS_43]

http://git-wip-us.apache.org/repos/asf/hive/blob/cc38bcc5/ql/src/test/results/clientpositive/spark/ptf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ptf.q.out 
b/ql/src/test/results/clientpositive/spark/ptf.q.out
index 2017923..62d0942 100644
--- a/ql/src/test/results/clientpositive/spark/ptf.q.out
+++ b/ql/src/test/results/clientpositive/spark/ptf.q.out
@@ -69,6 +69,7 @@ STAGE PLANS:
                     Statistics: Num rows: 26 Data size: 3147 Basic stats: 
COMPLETE Column stats: NONE
                     value expressions: _col5 (type: int), _col7 (type: double)
         Reducer 3 
+            Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), 
KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: 
double)
@@ -547,6 +548,7 @@ STAGE PLANS:
                     Statistics: Num rows: 26 Data size: 3147 Basic stats: 
COMPLETE Column stats: NONE
                     value expressions: _col5 (type: int), _col7 (type: double)
         Reducer 3 
+            Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), 
KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: 
double)
@@ -1572,6 +1574,7 @@ STAGE PLANS:
                     Statistics: Num rows: 26 Data size: 3147 Basic stats: 
COMPLETE Column stats: NONE
                     value expressions: _col5 (type: int), _col7 (type: double)
         Reducer 3 
+            Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), 
KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: 
double)
@@ -1744,6 +1747,7 @@ STAGE PLANS:
                     Statistics: Num rows: 26 Data size: 3147 Basic stats: 
COMPLETE Column stats: NONE
                     value expressions: _col5 (type: int), _col7 (type: double)
         Reducer 3 
+            Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), 
KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: 
double)
@@ -1976,6 +1980,7 @@ STAGE PLANS:
                     Statistics: Num rows: 26 Data size: 3147 Basic stats: 
COMPLETE Column stats: NONE
                     value expressions: _col5 (type: int), _col7 (type: double)
         Reducer 4 
+            Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), 
KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: 
double)
@@ -2916,6 +2921,7 @@ STAGE PLANS:
                     value expressions: p_size (type: int), p_retailprice 
(type: double)
             Execution mode: vectorized
         Reducer 3 
+            Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), 
KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: 
double)
@@ -3672,6 +3678,7 @@ STAGE PLANS:
                     Statistics: Num rows: 26 Data size: 3147 Basic stats: 
COMPLETE Column stats: NONE
                     value expressions: _col5 (type: int)
         Reducer 5 
+            Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), 
KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/cc38bcc5/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out 
b/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out
index f82e248..a133aad 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out
@@ -209,15 +209,28 @@ STAGE PLANS:
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                     value expressions: _col5 (type: int), _col7 (type: double)
         Reducer 3 
+            Execution mode: vectorized
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine spark IN [tez, spark] IS true
-                notVectorizedReason: PTF operator: sum UNBOUNDED end frame is 
not supported for ROWS window type
-                vectorized: false
+                reduceColumnNullOrder: aa
+                reduceColumnSortOrder: ++
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 4
+                    dataColumns: KEY.reducesinkkey0:string, 
KEY.reducesinkkey1:string, VALUE._col3:int, VALUE._col5:double
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [bigint, bigint, double, double]
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), 
KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: 
double)
                 outputColumnNames: _col1, _col2, _col5, _col7
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [1, 0, 2, 3]
                 Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                 PTF Operator
                   Function definitions:
@@ -252,13 +265,34 @@ STAGE PLANS:
                               name: sum
                               window function: GenericUDAFSumDouble
                               window frame: ROWS PRECEDING(MAX)~CURRENT
+                  PTF Vectorization:
+                      className: VectorPTFOperator
+                      evaluatorClasses: [VectorPTFEvaluatorRank, 
VectorPTFEvaluatorDenseRank, VectorPTFEvaluatorStreamingDoubleSum]
+                      functionInputExpressions: [col 1:string, col 1:string, 
col 3:double]
+                      functionNames: [rank, dense_rank, sum]
+                      keyInputColumns: [1, 0]
+                      native: true
+                      nonKeyInputColumns: [2, 3]
+                      orderExpressions: [col 1:string]
+                      outputColumns: [4, 5, 6, 1, 0, 2, 3]
+                      outputTypes: [int, int, double, string, string, int, 
double]
+                      partitionExpressions: [col 0:string]
+                      streamingColumns: [4, 5, 6]
                   Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                   Select Operator
                     expressions: _col2 (type: string), _col1 (type: string), 
_col5 (type: int), rank_window_0 (type: int), dense_rank_window_1 (type: int), 
round(sum_window_2, 2) (type: double)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [0, 1, 2, 4, 5, 7]
+                        selectExpressions: 
RoundWithNumDigitsDoubleToDouble(col 6, decimalPlaces 2) -> 7:double
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                       table:
                           input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -835,15 +869,28 @@ STAGE PLANS:
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                     value expressions: _col5 (type: int), _col7 (type: double)
         Reducer 3 
+            Execution mode: vectorized
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine spark IN [tez, spark] IS true
-                notVectorizedReason: PTF operator: sum UNBOUNDED end frame is 
not supported for ROWS window type
-                vectorized: false
+                reduceColumnNullOrder: aa
+                reduceColumnSortOrder: ++
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 4
+                    dataColumns: KEY.reducesinkkey0:string, 
KEY.reducesinkkey1:string, VALUE._col3:int, VALUE._col5:double
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [bigint, bigint, double, double]
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), 
KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: 
double)
                 outputColumnNames: _col1, _col2, _col5, _col7
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [1, 0, 2, 3]
                 Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                 PTF Operator
                   Function definitions:
@@ -878,13 +925,34 @@ STAGE PLANS:
                               name: sum
                               window function: GenericUDAFSumDouble
                               window frame: ROWS PRECEDING(MAX)~CURRENT
+                  PTF Vectorization:
+                      className: VectorPTFOperator
+                      evaluatorClasses: [VectorPTFEvaluatorRank, 
VectorPTFEvaluatorDenseRank, VectorPTFEvaluatorStreamingDoubleSum]
+                      functionInputExpressions: [col 1:string, col 1:string, 
col 3:double]
+                      functionNames: [rank, dense_rank, sum]
+                      keyInputColumns: [1, 0]
+                      native: true
+                      nonKeyInputColumns: [2, 3]
+                      orderExpressions: [col 1:string]
+                      outputColumns: [4, 5, 6, 1, 0, 2, 3]
+                      outputTypes: [int, int, double, string, string, int, 
double]
+                      partitionExpressions: [col 0:string]
+                      streamingColumns: [4, 5, 6]
                   Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                   Select Operator
                     expressions: _col2 (type: string), _col1 (type: string), 
_col5 (type: int), rank_window_0 (type: int), dense_rank_window_1 (type: int), 
round(sum_window_2, 2) (type: double)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [0, 1, 2, 4, 5, 7]
+                        selectExpressions: 
RoundWithNumDigitsDoubleToDouble(col 6, decimalPlaces 2) -> 7:double
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                       table:
                           input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -2112,15 +2180,28 @@ STAGE PLANS:
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                     value expressions: _col5 (type: int), _col7 (type: double)
         Reducer 3 
+            Execution mode: vectorized
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine spark IN [tez, spark] IS true
-                notVectorizedReason: PTF operator: sum UNBOUNDED end frame is 
not supported for ROWS window type
-                vectorized: false
+                reduceColumnNullOrder: aa
+                reduceColumnSortOrder: ++
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 4
+                    dataColumns: KEY.reducesinkkey0:string, 
KEY.reducesinkkey1:string, VALUE._col3:int, VALUE._col5:double
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [bigint, bigint, double, double]
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), 
KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: 
double)
                 outputColumnNames: _col1, _col2, _col5, _col7
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [1, 0, 2, 3]
                 Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                 PTF Operator
                   Function definitions:
@@ -2155,13 +2236,34 @@ STAGE PLANS:
                               name: sum
                               window function: GenericUDAFSumDouble
                               window frame: ROWS PRECEDING(MAX)~CURRENT
+                  PTF Vectorization:
+                      className: VectorPTFOperator
+                      evaluatorClasses: [VectorPTFEvaluatorRank, 
VectorPTFEvaluatorDenseRank, VectorPTFEvaluatorStreamingDoubleSum]
+                      functionInputExpressions: [col 1:string, col 1:string, 
col 3:double]
+                      functionNames: [rank, dense_rank, sum]
+                      keyInputColumns: [1, 0]
+                      native: true
+                      nonKeyInputColumns: [2, 3]
+                      orderExpressions: [col 1:string]
+                      outputColumns: [4, 5, 6, 1, 0, 2, 3]
+                      outputTypes: [int, int, double, string, string, int, 
double]
+                      partitionExpressions: [col 0:string]
+                      streamingColumns: [4, 5, 6]
                   Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                   Select Operator
                     expressions: _col2 (type: string), _col1 (type: string), 
_col5 (type: int), rank_window_0 (type: int), dense_rank_window_1 (type: int), 
round(sum_window_2, 2) (type: double)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [0, 1, 2, 4, 5, 7]
+                        selectExpressions: 
RoundWithNumDigitsDoubleToDouble(col 6, decimalPlaces 2) -> 7:double
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                       table:
                           input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -2323,15 +2425,28 @@ STAGE PLANS:
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                     value expressions: _col5 (type: int), _col7 (type: double)
         Reducer 3 
+            Execution mode: vectorized
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine spark IN [tez, spark] IS true
-                notVectorizedReason: PTF operator: sum UNBOUNDED end frame is 
not supported for ROWS window type
-                vectorized: false
+                reduceColumnNullOrder: aa
+                reduceColumnSortOrder: ++
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 4
+                    dataColumns: KEY.reducesinkkey0:string, 
KEY.reducesinkkey1:string, VALUE._col3:int, VALUE._col5:double
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [bigint, bigint, double, double]
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), 
KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: 
double)
                 outputColumnNames: _col1, _col2, _col5, _col7
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [1, 0, 2, 3]
                 Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                 PTF Operator
                   Function definitions:
@@ -2366,13 +2481,34 @@ STAGE PLANS:
                               name: sum
                               window function: GenericUDAFSumDouble
                               window frame: ROWS PRECEDING(MAX)~CURRENT
+                  PTF Vectorization:
+                      className: VectorPTFOperator
+                      evaluatorClasses: [VectorPTFEvaluatorRank, 
VectorPTFEvaluatorDenseRank, VectorPTFEvaluatorStreamingDoubleSum]
+                      functionInputExpressions: [col 1:string, col 1:string, 
col 3:double]
+                      functionNames: [rank, dense_rank, sum]
+                      keyInputColumns: [1, 0]
+                      native: true
+                      nonKeyInputColumns: [2, 3]
+                      orderExpressions: [col 1:string]
+                      outputColumns: [4, 5, 6, 1, 0, 2, 3]
+                      outputTypes: [int, int, double, string, string, int, 
double]
+                      partitionExpressions: [col 0:string]
+                      streamingColumns: [4, 5, 6]
                   Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                   Select Operator
                     expressions: _col2 (type: string), _col1 (type: string), 
_col5 (type: int), rank_window_0 (type: int), dense_rank_window_1 (type: int), 
round(sum_window_2, 2) (type: double)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [0, 1, 2, 4, 5, 7]
+                        selectExpressions: 
RoundWithNumDigitsDoubleToDouble(col 6, decimalPlaces 2) -> 7:double
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                       table:
                           input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -2599,15 +2735,28 @@ STAGE PLANS:
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                     value expressions: _col5 (type: int), _col7 (type: double)
         Reducer 4 
+            Execution mode: vectorized
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine spark IN [tez, spark] IS true
-                notVectorizedReason: PTF operator: sum UNBOUNDED end frame is 
not supported for ROWS window type
-                vectorized: false
+                reduceColumnNullOrder: aa
+                reduceColumnSortOrder: ++
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 4
+                    dataColumns: KEY.reducesinkkey0:string, 
KEY.reducesinkkey1:string, VALUE._col3:int, VALUE._col5:double
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [bigint, bigint, double, double]
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), 
KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: 
double)
                 outputColumnNames: _col1, _col2, _col5, _col7
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [1, 0, 2, 3]
                 Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                 PTF Operator
                   Function definitions:
@@ -2642,13 +2791,34 @@ STAGE PLANS:
                               name: sum
                               window function: GenericUDAFSumDouble
                               window frame: ROWS PRECEDING(MAX)~CURRENT
+                  PTF Vectorization:
+                      className: VectorPTFOperator
+                      evaluatorClasses: [VectorPTFEvaluatorRank, 
VectorPTFEvaluatorDenseRank, VectorPTFEvaluatorStreamingDoubleSum]
+                      functionInputExpressions: [col 1:string, col 1:string, 
col 3:double]
+                      functionNames: [rank, dense_rank, sum]
+                      keyInputColumns: [1, 0]
+                      native: true
+                      nonKeyInputColumns: [2, 3]
+                      orderExpressions: [col 1:string]
+                      outputColumns: [4, 5, 6, 1, 0, 2, 3]
+                      outputTypes: [int, int, double, string, string, int, 
double]
+                      partitionExpressions: [col 0:string]
+                      streamingColumns: [4, 5, 6]
                   Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                   Select Operator
                     expressions: _col2 (type: string), _col1 (type: string), 
_col5 (type: int), rank_window_0 (type: int), dense_rank_window_1 (type: int), 
round(sum_window_2, 2) (type: double)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [0, 1, 2, 4, 5, 7]
+                        selectExpressions: 
RoundWithNumDigitsDoubleToDouble(col 6, decimalPlaces 2) -> 7:double
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                       table:
                           input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -3108,7 +3278,7 @@ STAGE PLANS:
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine spark IN [tez, spark] IS true
-                notVectorizedReason: PTF operator: sum UNBOUNDED end frame is 
not supported for ROWS window type
+                notVectorizedReason: PTF operator: lag not in supported 
functions [avg, count, dense_rank, first_value, last_value, max, min, rank, 
row_number, sum]
                 vectorized: false
             Reduce Operator Tree:
               Select Operator
@@ -3792,15 +3962,28 @@ STAGE PLANS:
                     partitionColumnCount: 0
                     scratchColumnTypeNames: []
         Reducer 3 
+            Execution mode: vectorized
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine spark IN [tez, spark] IS true
-                notVectorizedReason: PTF operator: sum UNBOUNDED end frame is 
not supported for ROWS window type
-                vectorized: false
+                reduceColumnNullOrder: aa
+                reduceColumnSortOrder: ++
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 4
+                    dataColumns: KEY.reducesinkkey0:string, 
KEY.reducesinkkey1:string, VALUE._col3:int, VALUE._col5:double
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [bigint, bigint, double, double]
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), 
KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col5 (type: 
double)
                 outputColumnNames: _col1, _col2, _col5, _col7
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [1, 0, 2, 3]
                 Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                 PTF Operator
                   Function definitions:
@@ -3835,13 +4018,34 @@ STAGE PLANS:
                               name: sum
                               window function: GenericUDAFSumDouble
                               window frame: ROWS PRECEDING(MAX)~CURRENT
+                  PTF Vectorization:
+                      className: VectorPTFOperator
+                      evaluatorClasses: [VectorPTFEvaluatorRank, 
VectorPTFEvaluatorDenseRank, VectorPTFEvaluatorStreamingDoubleSum]
+                      functionInputExpressions: [col 1:string, col 1:string, 
col 3:double]
+                      functionNames: [rank, dense_rank, sum]
+                      keyInputColumns: [1, 0]
+                      native: true
+                      nonKeyInputColumns: [2, 3]
+                      orderExpressions: [col 1:string]
+                      outputColumns: [4, 5, 6, 1, 0, 2, 3]
+                      outputTypes: [int, int, double, string, string, int, 
double]
+                      partitionExpressions: [col 0:string]
+                      streamingColumns: [4, 5, 6]
                   Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                   Select Operator
                     expressions: _col2 (type: string), _col1 (type: string), 
_col5 (type: int), rank_window_0 (type: int), dense_rank_window_1 (type: int), 
round(sum_window_2, 2) (type: double)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [0, 1, 2, 4, 5, 7]
+                        selectExpressions: 
RoundWithNumDigitsDoubleToDouble(col 6, decimalPlaces 2) -> 7:double
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                       table:
                           input format: 
org.apache.hadoop.mapred.TextInputFormat
@@ -4659,15 +4863,28 @@ STAGE PLANS:
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                     value expressions: _col5 (type: int)
         Reducer 5 
+            Execution mode: vectorized
             Reduce Vectorization:
                 enabled: true
                 enableConditionsMet: hive.vectorized.execution.reduce.enabled 
IS true, hive.execution.engine spark IN [tez, spark] IS true
-                notVectorizedReason: PTF operator: sum UNBOUNDED end frame is 
not supported for ROWS window type
-                vectorized: false
+                reduceColumnNullOrder: aa
+                reduceColumnSortOrder: ++
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 3
+                    dataColumns: KEY.reducesinkkey0:string, 
KEY.reducesinkkey1:string, VALUE._col3:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [bigint, bigint, bigint]
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), 
KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int)
                 outputColumnNames: _col1, _col2, _col5
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [1, 0, 2]
                 Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                 PTF Operator
                   Function definitions:
@@ -4702,13 +4919,33 @@ STAGE PLANS:
                               name: sum
                               window function: GenericUDAFSumLong
                               window frame: ROWS PRECEDING(MAX)~CURRENT
+                  PTF Vectorization:
+                      className: VectorPTFOperator
+                      evaluatorClasses: [VectorPTFEvaluatorRank, 
VectorPTFEvaluatorDenseRank, VectorPTFEvaluatorStreamingLongSum]
+                      functionInputExpressions: [col 1:string, col 1:string, 
col 2:int]
+                      functionNames: [rank, dense_rank, sum]
+                      keyInputColumns: [1, 0]
+                      native: true
+                      nonKeyInputColumns: [2]
+                      orderExpressions: [col 1:string]
+                      outputColumns: [3, 4, 5, 1, 0, 2]
+                      outputTypes: [int, int, bigint, string, string, int]
+                      partitionExpressions: [col 0:string]
+                      streamingColumns: [3, 4, 5]
                   Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                   Select Operator
                     expressions: _col2 (type: string), _col1 (type: string), 
rank_window_0 (type: int), dense_rank_window_1 (type: int), _col5 (type: int), 
sum_window_2 (type: bigint)
                     outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [0, 1, 3, 4, 2, 5]
                     Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 26 Data size: 16042 Basic stats: 
COMPLETE Column stats: NONE
                       table:
                           input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat

Reply via email to