http://git-wip-us.apache.org/repos/asf/hive/blob/812d57f8/ql/src/test/results/clientpositive/vector_case_when_2.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/vector_case_when_2.q.out b/ql/src/test/results/clientpositive/vector_case_when_2.q.out index 159c983..b8a5214 100644 --- a/ql/src/test/results/clientpositive/vector_case_when_2.q.out +++ b/ql/src/test/results/clientpositive/vector_case_when_2.q.out @@ -129,15 +129,15 @@ STAGE PLANS: Map Operator Tree: TableScan alias: timestamps - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), CASE WHEN ((ctimestamp2 <= TIMESTAMP'1800-12-31 00:00:00.0')) THEN ('1800s or Earlier') WHEN ((ctimestamp2 < TIMESTAMP'1900-01-01 00:00:00.0')) THEN ('1900s') WHEN (ctimestamp2 BETWEEN TIMESTAMP'2006-01-01 00:00:00.0' AND TIMESTAMP'2010-12-31 23:59:59.999999999') THEN ('Late 2000s') WHEN ((ctimestamp2 <= TIMESTAMP'2015-12-31 23:59:59.999999999')) THEN ('Early 2010s') ELSE ('Unknown') END (type: string), CASE WHEN ((ctimestamp2 <= TIMESTAMP'2000-12-31 23:59:59.999999999')) THEN ('Old') WHEN ((ctimestamp2 < TIMESTAMP'2006-01-01 00:00:00.0')) THEN ('Early 2000s') WHEN (ctimestamp2 BETWEEN TIMESTAMP'2006-01-01 00:00:00.0' AND TIMESTAMP'2010-12-31 23:59:59.999999999') THEN ('Late 2000s') WHEN ((ctimestamp2 <= TIMESTAMP'2015-12-31 23:59:59.999999999')) THEN ('Early 2010s') ELSE (null) END (type: string), CASE WHEN ((ctimestamp2 <= TIMESTAMP'2000-12-31 23:59:59.999999999')) THEN ('Old') WHEN ((ctimesta mp2 < TIMESTAMP'2006-01-01 00:00:00.0')) THEN ('Early 2000s') WHEN (ctimestamp2 BETWEEN TIMESTAMP'2006-01-01 00:00:00.0' AND TIMESTAMP'2010-12-31 23:59:59.999999999') THEN ('Late 2000s') WHEN ((ctimestamp2 <= TIMESTAMP'2015-12-31 23:59:59.999999999')) THEN (null) ELSE (null) END (type: string), if((ctimestamp1 < TIMESTAMP'1974-10-04 17:21:03.989'), year(ctimestamp1), year(ctimestamp2)) (type: int), CASE WHEN ((stimestamp1 like '%19%')) THEN (stimestamp1) ELSE (TIMESTAMP'2018-03-08 23:04:59.0') END (type: string), if((ctimestamp1 = TIMESTAMP'2021-09-24 03:18:32.413655165'), null, minute(ctimestamp1)) (type: int), if(((ctimestamp2 >= TIMESTAMP'5344-10-04 18:40:08.165') and (ctimestamp2 < TIMESTAMP'6631-11-13 16:31:29.702202248')), minute(ctimestamp1), null) (type: int), if(((UDFToDouble(ctimestamp1) % 500.0D) > 100.0D), date_add(cdate, 1), date_add(cdate, 365)) (type: date), stimestamp1 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: timestamp), _col10 (type: string), _col1 (type: timestamp) sort order: +++ - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: int), _col8 (type: int), _col9 (type: date) Map Vectorization: enabled: true @@ -153,10 +153,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: timestamp), KEY.reducesinkkey2 (type: timestamp), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: int), VALUE._col4 (type: string), VALUE._col5 (type: int), VALUE._col6 (type: int), VALUE._col7 (type: date) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -365,7 +365,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: timestamps - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true vectorizationSchemaColumns: [0:cdate:date, 1:ctimestamp1:timestamp, 2:stimestamp1:string, 3:ctimestamp2:timestamp, 4:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>] @@ -377,7 +377,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 3, 10, 12, 13, 14, 11, 7, 16, 23, 2] selectExpressions: IfExprStringScalarStringGroupColumn(col 5:boolean, val 1800s or Earliercol 9:string)(children: TimestampColLessEqualTimestampScalar(col 3:timestamp, val 1800-12-31 00:00:00.0) -> 5:boolean, IfExprStringScalarStringGroupColumn(col 6:boolean, val 1900scol 10:string)(children: TimestampColLessTimestampScalar(col 3:timestamp, val 1900-01-01 00:00:00.0) -> 6:boolean, IfExprStringScalarStringGroupColumn(col 7:boolean, val Late 2000scol 9:string)(children: VectorUDFAdaptor(ctimestamp2 BETWEEN TIMESTAMP'2006-01-01 00:00:00.0' AND TIMESTAMP'2010-12-31 23:59:59.999999999') -> 7:boolean, IfExprStringScalarStringScalar(col 8:boolean, val Early 2010s, val Unknown)(children: TimestampColLessEqualTimestampScalar(col 3:timestamp, val 2015-12-31 23:59:59.999999999) -> 8:boolean) -> 9:string) -> 10:string) -> 9:string) -> 10:string, IfExprStringScalarStringGroupColumn(col 5:boolean, val Oldcol 11:string)(children: TimestampColLessEqualTimestampScalar(col 3:timesta mp, val 2000-12-31 23:59:59.999999999) -> 5:boolean, IfExprStringScalarStringGroupColumn(col 6:boolean, val Early 2000scol 12:string)(children: TimestampColLessTimestampScalar(col 3:timestamp, val 2006-01-01 00:00:00.0) -> 6:boolean, IfExprStringScalarStringGroupColumn(col 7:boolean, val Late 2000scol 11:string)(children: VectorUDFAdaptor(ctimestamp2 BETWEEN TIMESTAMP'2006-01-01 00:00:00.0' AND TIMESTAMP'2010-12-31 23:59:59.999999999') -> 7:boolean, IfExprColumnNull(col 8:boolean, col 9:string, null)(children: TimestampColLessEqualTimestampScalar(col 3:timestamp, val 2015-12-31 23:59:59.999999999) -> 8:boolean, ConstantVectorExpression(val Early 2010s) -> 9:string) -> 11:string) -> 12:string) -> 11:string) -> 12:string, IfExprStringScalarStringGroupColumn(col 5:boolean, val Oldcol 11:string)(children: TimestampColLessEqualTimestampScalar(col 3:timestamp, val 2000-12-31 23:59:59.999999999) -> 5:boolean, IfExprStringScalarStringGroupColumn(col 6:boolean, val Early 2000scol 13:string)( children: TimestampColLessTimestampScalar(col 3:timestamp, val 2006-01-01 00:00:00.0) -> 6:boolean, IfExprStringScalarStringGroupColumn(col 7:boolean, val Late 2000scol 11:string)(children: VectorUDFAdaptor(ctimestamp2 BETWEEN TIMESTAMP'2006-01-01 00:00:00.0' AND TIMESTAMP'2010-12-31 23:59:59.999999999') -> 7:boolean, IfExprNullNull(null, null) -> 11:string) -> 13:string) -> 11:string) -> 13:string, IfExprLongColumnLongColumn(col 5:boolean, col 6:int, col 7:int)(children: TimestampColLessTimestampScalar(col 1:timestamp, val 1974-10-04 17:21:03.989) -> 5:boolean, VectorUDFYearTimestamp(col 1:timestamp, field YEAR) -> 6:int, VectorUDFYearTimestamp(col 3:timestamp, field YEAR) -> 7:int) -> 14:int, VectorUDFAdaptor(CASE WHEN ((stimestamp1 like '%19%')) THEN (stimestamp1) ELSE (TIMESTAMP'2018-03-08 23:04:59.0') END)(children: SelectStringColLikeStringScalar(col 2:string) -> 5:boolean) -> 11:string, IfExprNullColumn(col 5:boolean, null, col 6)(children: TimestampColEqualTimestampScalar(co l 1:timestamp, val 2021-09-24 03:18:32.413655165) -> 5:boolean, VectorUDFMinuteTimestamp(col 1:timestamp, field MINUTE) -> 6:int) -> 7:int, IfExprColumnNull(col 17:boolean, col 15:int, null)(children: ColAndCol(col 15:boolean, col 16:boolean)(children: TimestampColGreaterEqualTimestampScalar(col 3:timestamp, val 5344-10-04 18:40:08.165) -> 15:boolean, TimestampColLessTimestampScalar(col 3:timestamp, val 6631-11-13 16:31:29.702202248) -> 16:boolean) -> 17:boolean, VectorUDFMinuteTimestamp(col 1:timestamp, field MINUTE) -> 15:int) -> 16:int, IfExprLongColumnLongColumn(col 20:boolean, col 21:date, col 22:date)(children: DoubleColGreaterDoubleScalar(col 19:double, val 100.0)(children: DoubleColModuloDoubleScalar(col 18:double, val 500.0)(children: CastTimestampToDouble(col 1:timestamp) -> 18:double) -> 19:double) -> 20:boolean, VectorUDFDateAddColScalar(col 0:date, val 1) -> 21:date, VectorUDFDateAddColScalar(col 0:date, val 365) -> 22:date) -> 23:date - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: timestamp), _col10 (type: string), _col1 (type: timestamp) sort order: +++ @@ -386,7 +386,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: int), _col8 (type: int), _col9 (type: date) Execution mode: vectorized Map Vectorization: @@ -412,10 +412,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: timestamp), KEY.reducesinkkey2 (type: timestamp), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: int), VALUE._col4 (type: string), VALUE._col5 (type: int), VALUE._col6 (type: int), VALUE._col7 (type: date) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -624,7 +624,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: timestamps - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true vectorizationSchemaColumns: [0:cdate:date, 1:ctimestamp1:timestamp, 2:stimestamp1:string, 3:ctimestamp2:timestamp, 4:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>] @@ -636,7 +636,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 3, 15, 26, 36, 40, 42, 44, 46, 53, 2] selectExpressions: IfExprColumnCondExpr(col 5:boolean, col 6:stringcol 14:string)(children: TimestampColLessEqualTimestampScalar(col 3:timestamp, val 1800-12-31 00:00:00.0) -> 5:boolean, ConstantVectorExpression(val 1800s or Earlier) -> 6:string, IfExprColumnCondExpr(col 7:boolean, col 8:stringcol 13:string)(children: TimestampColLessTimestampScalar(col 3:timestamp, val 1900-01-01 00:00:00.0) -> 7:boolean, ConstantVectorExpression(val 1900s) -> 8:string, IfExprColumnCondExpr(col 9:boolean, col 10:stringcol 12:string)(children: VectorUDFAdaptor(ctimestamp2 BETWEEN TIMESTAMP'2006-01-01 00:00:00.0' AND TIMESTAMP'2010-12-31 23:59:59.999999999') -> 9:boolean, ConstantVectorExpression(val Late 2000s) -> 10:string, IfExprStringScalarStringScalar(col 11:boolean, val Early 2010s, val Unknown)(children: TimestampColLessEqualTimestampScalar(col 3:timestamp, val 2015-12-31 23:59:59.999999999) -> 11:boolean) -> 12:string) -> 13:string) -> 14:string) -> 15:string, IfExprColumnCo ndExpr(col 11:boolean, col 16:stringcol 25:string)(children: TimestampColLessEqualTimestampScalar(col 3:timestamp, val 2000-12-31 23:59:59.999999999) -> 11:boolean, ConstantVectorExpression(val Old) -> 16:string, IfExprColumnCondExpr(col 17:boolean, col 18:stringcol 24:string)(children: TimestampColLessTimestampScalar(col 3:timestamp, val 2006-01-01 00:00:00.0) -> 17:boolean, ConstantVectorExpression(val Early 2000s) -> 18:string, IfExprColumnCondExpr(col 19:boolean, col 20:stringcol 23:string)(children: VectorUDFAdaptor(ctimestamp2 BETWEEN TIMESTAMP'2006-01-01 00:00:00.0' AND TIMESTAMP'2010-12-31 23:59:59.999999999') -> 19:boolean, ConstantVectorExpression(val Late 2000s) -> 20:string, IfExprColumnNull(col 21:boolean, col 22:string, null)(children: TimestampColLessEqualTimestampScalar(col 3:timestamp, val 2015-12-31 23:59:59.999999999) -> 21:boolean, ConstantVectorExpression(val Early 2010s) -> 22:string) -> 23:string) -> 24:string) -> 25:string) -> 26:string, IfExprColumnCondExpr( col 27:boolean, col 28:stringcol 35:string)(children: TimestampColLessEqualTimestampScalar(col 3:timestamp, val 2000-12-31 23:59:59.999999999) -> 27:boolean, ConstantVectorExpression(val Old) -> 28:string, IfExprColumnCondExpr(col 29:boolean, col 30:stringcol 34:string)(children: TimestampColLessTimestampScalar(col 3:timestamp, val 2006-01-01 00:00:00.0) -> 29:boolean, ConstantVectorExpression(val Early 2000s) -> 30:string, IfExprColumnCondExpr(col 31:boolean, col 32:stringcol 33:string)(children: VectorUDFAdaptor(ctimestamp2 BETWEEN TIMESTAMP'2006-01-01 00:00:00.0' AND TIMESTAMP'2010-12-31 23:59:59.999999999') -> 31:boolean, ConstantVectorExpression(val Late 2000s) -> 32:string, IfExprNullNull(null, null) -> 33:string) -> 34:string) -> 35:string) -> 36:string, IfExprCondExprCondExpr(col 37:boolean, col 38:intcol 39:int)(children: TimestampColLessTimestampScalar(col 1:timestamp, val 1974-10-04 17:21:03.989) -> 37:boolean, VectorUDFYearTimestamp(col 1:timestamp, field YEAR) -> 38:int , VectorUDFYearTimestamp(col 3:timestamp, field YEAR) -> 39:int) -> 40:int, VectorUDFAdaptor(CASE WHEN ((stimestamp1 like '%19%')) THEN (stimestamp1) ELSE (TIMESTAMP'2018-03-08 23:04:59.0') END)(children: SelectStringColLikeStringScalar(col 2:string) -> 41:boolean) -> 42:string, IfExprNullCondExpr(col 41:boolean, null, col 43:int)(children: TimestampColEqualTimestampScalar(col 1:timestamp, val 2021-09-24 03:18:32.413655165) -> 41:boolean, VectorUDFMinuteTimestamp(col 1:timestamp, field MINUTE) -> 43:int) -> 44:int, IfExprCondExprNull(col 47:boolean, col 45:int, null)(children: ColAndCol(col 45:boolean, col 46:boolean)(children: TimestampColGreaterEqualTimestampScalar(col 3:timestamp, val 5344-10-04 18:40:08.165) -> 45:boolean, TimestampColLessTimestampScalar(col 3:timestamp, val 6631-11-13 16:31:29.702202248) -> 46:boolean) -> 47:boolean, VectorUDFMinuteTimestamp(col 1:timestamp, field MINUTE) -> 45:int) -> 46:int, IfExprCondExprCondExpr(col 50:boolean, col 51:datecol 52:date)(child ren: DoubleColGreaterDoubleScalar(col 49:double, val 100.0)(children: DoubleColModuloDoubleScalar(col 48:double, val 500.0)(children: CastTimestampToDouble(col 1:timestamp) -> 48:double) -> 49:double) -> 50:boolean, VectorUDFDateAddColScalar(col 0:date, val 1) -> 51:date, VectorUDFDateAddColScalar(col 0:date, val 365) -> 52:date) -> 53:date - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: timestamp), _col10 (type: string), _col1 (type: timestamp) sort order: +++ @@ -645,7 +645,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: int), _col8 (type: int), _col9 (type: date) Execution mode: vectorized Map Vectorization: @@ -671,10 +671,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: timestamp), KEY.reducesinkkey2 (type: timestamp), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: int), VALUE._col4 (type: string), VALUE._col5 (type: int), VALUE._col6 (type: int), VALUE._col7 (type: date) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 51 Data size: 12384 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 51 Data size: 12300 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
http://git-wip-us.apache.org/repos/asf/hive/blob/812d57f8/ql/src/test/results/clientpositive/vector_char_2.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/vector_char_2.q.out b/ql/src/test/results/clientpositive/vector_char_2.q.out index ae9910d..97038ee 100644 --- a/ql/src/test/results/clientpositive/vector_char_2.q.out +++ b/ql/src/test/results/clientpositive/vector_char_2.q.out @@ -84,7 +84,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: char_2_n0 - Statistics: Num rows: 501 Data size: 99168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 99000 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -95,7 +95,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 3] selectExpressions: CastStringToLong(col 0:char(10)) -> 3:int - Statistics: Num rows: 501 Data size: 99168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 99000 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col1), count() Group By Vectorization: @@ -109,7 +109,7 @@ STAGE PLANS: keys: _col0 (type: char(20)) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 501 Data size: 99168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 99000 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: char(20)) sort order: + @@ -119,7 +119,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 501 Data size: 99168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 99000 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.1 value expressions: _col1 (type: bigint), _col2 (type: bigint) Execution mode: vectorized @@ -142,7 +142,7 @@ STAGE PLANS: keys: KEY._col0 (type: char(20)) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 49485 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 49401 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -164,7 +164,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 250 Data size: 49485 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 49401 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.1 value expressions: _col1 (type: bigint), _col2 (type: bigint) Execution mode: vectorized @@ -185,7 +185,7 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: char(20)), VALUE._col0 (type: bigint), VALUE._col1 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 49485 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 49401 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 5 Statistics: Num rows: 5 Data size: 985 Basic stats: COMPLETE Column stats: NONE @@ -272,7 +272,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: char_2_n0 - Statistics: Num rows: 501 Data size: 99168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 99000 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -283,7 +283,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 3] selectExpressions: CastStringToLong(col 0:char(10)) -> 3:int - Statistics: Num rows: 501 Data size: 99168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 99000 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col1), count() Group By Vectorization: @@ -297,7 +297,7 @@ STAGE PLANS: keys: _col0 (type: char(20)) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 501 Data size: 99168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 99000 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: char(20)) sort order: - @@ -307,7 +307,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 501 Data size: 99168 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 501 Data size: 99000 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.1 value expressions: _col1 (type: bigint), _col2 (type: bigint) Execution mode: vectorized @@ -330,7 +330,7 @@ STAGE PLANS: keys: KEY._col0 (type: char(20)) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 49485 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 49401 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -352,7 +352,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 250 Data size: 49485 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 49401 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.1 value expressions: _col1 (type: bigint), _col2 (type: bigint) Execution mode: vectorized @@ -373,7 +373,7 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: char(20)), VALUE._col0 (type: bigint), VALUE._col1 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 49485 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 49401 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 5 Statistics: Num rows: 5 Data size: 985 Basic stats: COMPLETE Column stats: NONE http://git-wip-us.apache.org/repos/asf/hive/blob/812d57f8/ql/src/test/results/clientpositive/vector_coalesce_2.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/vector_coalesce_2.q.out b/ql/src/test/results/clientpositive/vector_coalesce_2.q.out index 48d38c3..c42d295 100644 --- a/ql/src/test/results/clientpositive/vector_coalesce_2.q.out +++ b/ql/src/test/results/clientpositive/vector_coalesce_2.q.out @@ -52,7 +52,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: str_str_orc - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -63,7 +63,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 5] selectExpressions: CastStringToLong(col 4:string)(children: VectorCoalesce(columns [0, 3])(children: col 0:string, ConstantVectorExpression(val 0) -> 3:string) -> 4:string) -> 5:int - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col1) Group By Vectorization: @@ -77,7 +77,7 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + @@ -87,7 +87,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) Execution mode: vectorized Map Vectorization: @@ -109,14 +109,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 271 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), round((UDFToDouble(_col1) / 60.0D), 2) (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 271 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 271 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -167,7 +167,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: str_str_orc - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -178,13 +178,13 @@ STAGE PLANS: native: true projectedOutputColumnNums: [4] selectExpressions: VectorCoalesce(columns [0, 3])(children: col 0:string, ConstantVectorExpression(val 0) -> 3:string) -> 4:string - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -247,7 +247,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: str_str_orc - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -258,7 +258,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 5] selectExpressions: CastStringToLong(col 4:string)(children: VectorCoalesce(columns [0, 3])(children: col 0:string, ConstantVectorExpression(val 0) -> 3:string) -> 4:string) -> 5:int - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col1) Group By Vectorization: @@ -272,7 +272,7 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + @@ -282,7 +282,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) Execution mode: vectorized Map Vectorization: @@ -304,14 +304,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 271 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), round((UDFToDouble(_col1) / 60.0D), 2) (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 271 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 271 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -362,7 +362,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: str_str_orc - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -373,13 +373,13 @@ STAGE PLANS: native: true projectedOutputColumnNums: [4] selectExpressions: VectorCoalesce(columns [0, 3])(children: col 0:string, ConstantVectorExpression(val 0) -> 3:string) -> 4:string - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false File Sink Vectorization: className: VectorFileSinkOperator native: false - Statistics: Num rows: 5 Data size: 678 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 510 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat http://git-wip-us.apache.org/repos/asf/hive/blob/812d57f8/ql/src/test/results/clientpositive/vector_data_types.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/vector_data_types.q.out b/ql/src/test/results/clientpositive/vector_data_types.q.out index c2a2fce..b72340d 100644 --- a/ql/src/test/results/clientpositive/vector_data_types.q.out +++ b/ql/src/test/results/clientpositive/vector_data_types.q.out @@ -128,22 +128,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: over1korc_n1 - Statistics: Num rows: 1050 Data size: 311254 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1050 Data size: 311170 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float), d (type: double), bo (type: boolean), s (type: string), ts (type: timestamp), dec (type: decimal(4,2)), bin (type: binary) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 - Statistics: Num rows: 1050 Data size: 311254 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1050 Data size: 311170 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int) sort order: +++ - Statistics: Num rows: 1050 Data size: 311254 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1050 Data size: 311170 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.1 value expressions: _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: boolean), _col7 (type: string), _col8 (type: timestamp), _col9 (type: decimal(4,2)), _col10 (type: binary) Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: tinyint), KEY.reducesinkkey1 (type: smallint), KEY.reducesinkkey2 (type: int), VALUE._col0 (type: bigint), VALUE._col1 (type: float), VALUE._col2 (type: double), VALUE._col3 (type: boolean), VALUE._col4 (type: string), VALUE._col5 (type: timestamp), VALUE._col6 (type: decimal(4,2)), VALUE._col7 (type: binary) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 - Statistics: Num rows: 1050 Data size: 311254 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1050 Data size: 311170 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 20 Statistics: Num rows: 20 Data size: 5920 Basic stats: COMPLETE Column stats: NONE @@ -218,7 +218,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: over1korc_n1 - Statistics: Num rows: 1050 Data size: 311254 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1050 Data size: 311170 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -228,7 +228,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - Statistics: Num rows: 1050 Data size: 311254 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1050 Data size: 311170 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int) sort order: +++ @@ -237,7 +237,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 1050 Data size: 311254 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1050 Data size: 311170 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.1 value expressions: _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: boolean), _col7 (type: string), _col8 (type: timestamp), _col9 (type: decimal(4,2)), _col10 (type: binary) Execution mode: vectorized @@ -258,7 +258,7 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: tinyint), KEY.reducesinkkey1 (type: smallint), KEY.reducesinkkey2 (type: int), VALUE._col0 (type: bigint), VALUE._col1 (type: float), VALUE._col2 (type: double), VALUE._col3 (type: boolean), VALUE._col4 (type: string), VALUE._col5 (type: timestamp), VALUE._col6 (type: decimal(4,2)), VALUE._col7 (type: binary) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 - Statistics: Num rows: 1050 Data size: 311254 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1050 Data size: 311170 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 20 Statistics: Num rows: 20 Data size: 5920 Basic stats: COMPLETE Column stats: NONE @@ -326,7 +326,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: over1korc_n1 - Statistics: Num rows: 1050 Data size: 311254 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1050 Data size: 311170 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -337,7 +337,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [12] selectExpressions: VectorUDFAdaptor(hash(t,si,i,b,f,d,bo,s,ts,dec,bin)) -> 12:int - Statistics: Num rows: 1050 Data size: 311254 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1050 Data size: 311170 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col0) Group By Vectorization: http://git-wip-us.apache.org/repos/asf/hive/blob/812d57f8/ql/src/test/results/clientpositive/vector_interval_1.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/vector_interval_1.q.out b/ql/src/test/results/clientpositive/vector_interval_1.q.out index 8c0086e..70b7c66 100644 --- a/ql/src/test/results/clientpositive/vector_interval_1.q.out +++ b/ql/src/test/results/clientpositive/vector_interval_1.q.out @@ -80,7 +80,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: vector_interval_1 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -91,7 +91,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [2, 5, 6] selectExpressions: CastStringToIntervalYearMonth(col 2:string) -> 5:interval_year_month, CastStringToIntervalDayTime(col 3:string) -> 6:interval_day_time - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + @@ -100,7 +100,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: interval_year_month), _col2 (type: interval_day_time) Execution mode: vectorized Map Vectorization: @@ -120,10 +120,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: string), INTERVAL'1-2' (type: interval_year_month), VALUE._col0 (type: interval_year_month), INTERVAL'1 02:03:04.000000000' (type: interval_day_time), VALUE._col1 (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -191,7 +191,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: vector_interval_1 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -202,7 +202,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 7, 6, 9, 8] selectExpressions: IntervalYearMonthColAddIntervalYearMonthColumn(col 5:interval_year_month, col 6:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 5:interval_year_month, CastStringToIntervalYearMonth(col 2:string) -> 6:interval_year_month) -> 7:interval_year_month, IntervalYearMonthScalarAddIntervalYearMonthColumn(val 14, col 5:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 5:interval_year_month) -> 6:interval_year_month, IntervalYearMonthColSubtractIntervalYearMonthColumn(col 5:interval_year_month, col 8:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 5:interval_year_month, CastStringToIntervalYearMonth(col 2:string) -> 8:interval_year_month) -> 9:interval_year_month, IntervalYearMonthScalarSubtractIntervalYearMonthColumn(val 14, col 5:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 5:interval_year_month) -> 8:interval_year_month - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: date) sort order: + @@ -211,7 +211,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: interval_year_month), _col2 (type: interval_year_month), _col3 (type: interval_year_month), _col4 (type: interval_year_month) Execution mode: vectorized Map Vectorization: @@ -231,10 +231,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: date), INTERVAL'2-4' (type: interval_year_month), VALUE._col0 (type: interval_year_month), VALUE._col1 (type: interval_year_month), INTERVAL'0-0' (type: interval_year_month), VALUE._col2 (type: interval_year_month), VALUE._col3 (type: interval_year_month) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -310,7 +310,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: vector_interval_1 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -321,7 +321,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 7, 6, 9, 8] selectExpressions: IntervalDayTimeColAddIntervalDayTimeColumn(col 5:interval_day_time, col 6:interval_day_time)(children: CastStringToIntervalDayTime(col 3:string) -> 5:interval_day_time, CastStringToIntervalDayTime(col 3:string) -> 6:interval_day_time) -> 7:interval_day_time, IntervalDayTimeScalarAddIntervalDayTimeColumn(val 1 02:03:04.000000000, col 5:interval_day_time)(children: CastStringToIntervalDayTime(col 3:string) -> 5:interval_day_time) -> 6:interval_day_time, IntervalDayTimeColSubtractIntervalDayTimeColumn(col 5:interval_day_time, col 8:interval_day_time)(children: CastStringToIntervalDayTime(col 3:string) -> 5:interval_day_time, CastStringToIntervalDayTime(col 3:string) -> 8:interval_day_time) -> 9:interval_day_time, IntervalDayTimeScalarSubtractIntervalDayTimeColumn(val 1 02:03:04.000000000, col 5:interval_day_time)(children: CastStringToIntervalDayTime(col 3:string) -> 5:interval_day_time) -> 8:interval_day_time - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: date) sort order: + @@ -330,7 +330,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: interval_day_time), _col2 (type: interval_day_time), _col3 (type: interval_day_time), _col4 (type: interval_day_time) Execution mode: vectorized Map Vectorization: @@ -350,10 +350,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: date), INTERVAL'2 04:06:08.000000000' (type: interval_day_time), VALUE._col0 (type: interval_day_time), VALUE._col1 (type: interval_day_time), INTERVAL'0 00:00:00.000000000' (type: interval_day_time), VALUE._col2 (type: interval_day_time), VALUE._col3 (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -441,7 +441,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: vector_interval_1 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -452,7 +452,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 5, 7, 6, 9, 8, 11, 12, 14, 15, 16, 17, 18] selectExpressions: DateColAddIntervalYearMonthScalar(col 1:date, val 1-2) -> 5:date, DateColAddIntervalYearMonthColumn(col 1:date, col 6:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 6:interval_year_month) -> 7:date, IntervalYearMonthScalarAddDateColumn(val 1-2, col 1:interval_year_month) -> 6:date, IntervalYearMonthColAddDateColumn(col 8:interval_year_month, col 1:date)(children: CastStringToIntervalYearMonth(col 2:string) -> 8:interval_year_month) -> 9:date, DateColSubtractIntervalYearMonthScalar(col 1:date, val 1-2) -> 8:date, DateColSubtractIntervalYearMonthColumn(col 1:date, col 10:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 10:interval_year_month) -> 11:date, DateColAddIntervalDayTimeScalar(col 1:date, val 1 02:03:04.000000000) -> 12:timestamp, DateColAddIntervalDayTimeColumn(col 1:date, col 13:interval_day_time)(children: CastStringToIntervalDayTime(col 3:string) -> 13:interval_day_time) -> 14 :timestamp, IntervalDayTimeScalarAddDateColumn(val 1 02:03:04.000000000, col 1:date) -> 15:timestamp, IntervalDayTimeColAddDateColumn(col 13:interval_day_time, col 1:date)(children: CastStringToIntervalDayTime(col 3:string) -> 13:interval_day_time) -> 16:timestamp, DateColSubtractIntervalDayTimeScalar(col 1:date, val 1 02:03:04.000000000) -> 17:timestamp, DateColSubtractIntervalDayTimeColumn(col 1:date, col 13:interval_day_time)(children: CastStringToIntervalDayTime(col 3:string) -> 13:interval_day_time) -> 18:timestamp - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: date) sort order: + @@ -461,7 +461,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: date), _col2 (type: date), _col3 (type: date), _col4 (type: date), _col5 (type: date), _col6 (type: date), _col7 (type: timestamp), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: timestamp), _col11 (type: timestamp), _col12 (type: timestamp) Execution mode: vectorized Map Vectorization: @@ -481,10 +481,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: date), VALUE._col1 (type: date), VALUE._col2 (type: date), VALUE._col3 (type: date), VALUE._col4 (type: date), VALUE._col5 (type: date), VALUE._col6 (type: timestamp), VALUE._col7 (type: timestamp), VALUE._col8 (type: timestamp), VALUE._col9 (type: timestamp), VALUE._col10 (type: timestamp), VALUE._col11 (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -584,7 +584,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: vector_interval_1 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -595,7 +595,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [0, 5, 7, 8, 9, 10, 11, 12, 14, 15, 16, 17, 18] selectExpressions: TimestampColAddIntervalYearMonthScalar(col 0:timestamp, val 1-2) -> 5:timestamp, TimestampColAddIntervalYearMonthColumn(col 0:timestamp, col 6:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 6:interval_year_month) -> 7:timestamp, IntervalYearMonthScalarAddTimestampColumn(val 1-2, col 0:interval_year_month) -> 8:timestamp, IntervalYearMonthColAddTimestampColumn(col 6:interval_year_month, col 0:timestamp)(children: CastStringToIntervalYearMonth(col 2:string) -> 6:interval_year_month) -> 9:timestamp, TimestampColSubtractIntervalYearMonthScalar(col 0:timestamp, val 1-2) -> 10:timestamp, TimestampColSubtractIntervalYearMonthColumn(col 0:timestamp, col 6:interval_year_month)(children: CastStringToIntervalYearMonth(col 2:string) -> 6:interval_year_month) -> 11:timestamp, TimestampColAddIntervalDayTimeScalar(col 0:timestamp, val 1 02:03:04.000000000) -> 12:timestamp, TimestampColAddIntervalDayTimeColumn(col 0:timestamp, col 13:interval_day_time)(children: CastStringToIntervalDayTime(col 3:string) -> 13:interval_day_time) -> 14:timestamp, IntervalDayTimeScalarAddTimestampColumn(val 1 02:03:04.000000000, col 0:timestamp) -> 15:timestamp, IntervalDayTimeColAddTimestampColumn(col 13:interval_day_time, col 0:timestamp)(children: CastStringToIntervalDayTime(col 3:string) -> 13:interval_day_time) -> 16:timestamp, TimestampColSubtractIntervalDayTimeScalar(col 0:timestamp, val 1 02:03:04.000000000) -> 17:timestamp, TimestampColSubtractIntervalDayTimeColumn(col 0:timestamp, col 13:interval_day_time)(children: CastStringToIntervalDayTime(col 3:string) -> 13:interval_day_time) -> 18:timestamp - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: timestamp) sort order: + @@ -604,7 +604,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: timestamp), _col2 (type: timestamp), _col3 (type: timestamp), _col4 (type: timestamp), _col5 (type: timestamp), _col6 (type: timestamp), _col7 (type: timestamp), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: timestamp), _col11 (type: timestamp), _col12 (type: timestamp) Execution mode: vectorized Map Vectorization: @@ -624,10 +624,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: timestamp), VALUE._col0 (type: timestamp), VALUE._col1 (type: timestamp), VALUE._col2 (type: timestamp), VALUE._col3 (type: timestamp), VALUE._col4 (type: timestamp), VALUE._col5 (type: timestamp), VALUE._col6 (type: timestamp), VALUE._col7 (type: timestamp), VALUE._col8 (type: timestamp), VALUE._col9 (type: timestamp), VALUE._col10 (type: timestamp), VALUE._col11 (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -709,7 +709,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: vector_interval_1 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -720,7 +720,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [0, 5, 6, 7] selectExpressions: TimestampColSubtractTimestampColumn(col 0:timestamp, col 0:timestamp) -> 5:interval_day_time, TimestampScalarSubtractTimestampColumn(val 2001-01-01 01:02:03.0, col 0:timestamp) -> 6:interval_day_time, TimestampColSubtractTimestampScalar(col 0:timestamp, val 2001-01-01 01:02:03.0) -> 7:interval_day_time - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: timestamp) sort order: + @@ -729,7 +729,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: interval_day_time), _col2 (type: interval_day_time), _col3 (type: interval_day_time) Execution mode: vectorized Map Vectorization: @@ -749,10 +749,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: timestamp), VALUE._col0 (type: interval_day_time), VALUE._col1 (type: interval_day_time), VALUE._col2 (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -816,7 +816,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: vector_interval_1 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -827,7 +827,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 5, 6, 7] selectExpressions: DateColSubtractDateColumn(col 1:date, col 1:date) -> 5:interval_day_time, DateScalarSubtractDateColumn(val 2001-01-01 00:00:00.0, col 1:date) -> 6:interval_day_time, DateColSubtractDateScalar(col 1:date, val 2001-01-01 00:00:00.0) -> 7:interval_day_time - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: date) sort order: + @@ -836,7 +836,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: interval_day_time), _col2 (type: interval_day_time), _col3 (type: interval_day_time) Execution mode: vectorized Map Vectorization: @@ -856,10 +856,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: interval_day_time), VALUE._col1 (type: interval_day_time), VALUE._col2 (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -929,7 +929,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: vector_interval_1 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -940,7 +940,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 5, 6, 7, 8, 9, 10] selectExpressions: TimestampColSubtractDateColumn(col 0:timestamp, col 1:date) -> 5:interval_day_time, TimestampScalarSubtractDateColumn(val 2001-01-01 01:02:03.0, col 1:date) -> 6:interval_day_time, TimestampColSubtractDateScalar(col 0:timestamp, val 2001-01-01 00:00:00.0) -> 7:interval_day_time, DateColSubtractTimestampColumn(col 1:date, col 0:timestamp) -> 8:interval_day_time, DateColSubtractTimestampScalar(col 1:date, val 2001-01-01 01:02:03.0) -> 9:interval_day_time, DateScalarSubtractTimestampColumn(val 2001-01-01 00:00:00.0, col 0:timestamp) -> 10:interval_day_time - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: date) sort order: + @@ -949,7 +949,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: interval_day_time), _col2 (type: interval_day_time), _col3 (type: interval_day_time), _col4 (type: interval_day_time), _col5 (type: interval_day_time), _col6 (type: interval_day_time) Execution mode: vectorized Map Vectorization: @@ -969,10 +969,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: interval_day_time), VALUE._col1 (type: interval_day_time), VALUE._col2 (type: interval_day_time), VALUE._col3 (type: interval_day_time), VALUE._col4 (type: interval_day_time), VALUE._col5 (type: interval_day_time) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 274 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat http://git-wip-us.apache.org/repos/asf/hive/blob/812d57f8/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out b/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out index 01e915b..9f90e82 100644 --- a/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out +++ b/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out @@ -253,7 +253,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_string - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -264,7 +264,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 1, 3, 14, 15, 16, 17, 18] selectExpressions: VectorUDFUnixTimeStampTimestamp(col 1:timestamp) -> 5:bigint, VectorUDFYearTimestamp(col 1:timestamp, field YEAR) -> 6:int, VectorUDFMonthTimestamp(col 1:timestamp, field MONTH) -> 7:int, VectorUDFDayOfMonthTimestamp(col 1:timestamp, field DAY_OF_MONTH) -> 8:int, VectorUDFDayOfMonthTimestamp(col 1:timestamp, field DAY_OF_MONTH) -> 9:int, VectorUDFWeekOfYearTimestamp(col 1:timestamp, field WEEK_OF_YEAR) -> 10:int, VectorUDFHourTimestamp(col 1:timestamp, field HOUR_OF_DAY) -> 11:int, VectorUDFMinuteTimestamp(col 1:timestamp, field MINUTE) -> 12:int, VectorUDFSecondTimestamp(col 1:timestamp, field SECOND) -> 13:int, IfExprTimestampColumnScalar(col 0:boolean, col 1:timestamp, val 1319-02-02 16:31:57.778) -> 14:timestamp, IfExprTimestampScalarColumn(col 0:boolean, val 2000-12-18 08:42:30.0005, col 1:timestamp) -> 15:timestamp, IfExprTimestampColumnColumn(col 0:boolean, col 1:timestampcol 3:timestamp) -> 16:timestamp, IfExprColumnNull(col 0:boolean, co l 1:timestamp, null)(children: col 0:boolean, col 1:timestamp) -> 17:timestamp, IfExprNullColumn(col 0:boolean, null, col 3)(children: col 0:boolean, col 3:timestamp) -> 18:timestamp - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: bigint) sort order: + @@ -273,7 +273,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: int), _col8 (type: int), _col9 (type: boolean), _col10 (type: timestamp), _col11 (type: timestamp), _col12 (type: timestamp), _col13 (type: timestamp), _col14 (type: timestamp), _col15 (type: timestamp), _col16 (type: timestamp) Execution mode: vectorized Map Vectorization: @@ -293,10 +293,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: bigint), VALUE._col0 (type: int), VALUE._col1 (type: int), VALUE._col2 (type: int), VALUE._col3 (type: int), VALUE._col4 (type: int), VALUE._col5 (type: int), VALUE._col6 (type: int), VALUE._col7 (type: int), VALUE._col8 (type: boolean), VALUE._col9 (type: timestamp), VALUE._col10 (type: timestamp), VALUE._col11 (type: timestamp), VALUE._col12 (type: timestamp), VALUE._col13 (type: timestamp), VALUE._col14 (type: timestamp), VALUE._col15 (type: timestamp) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16 - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -446,7 +446,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_string - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -457,7 +457,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [5, 6, 7, 8, 9, 10, 11, 12, 13] selectExpressions: VectorUDFUnixTimeStampString(col 2:string) -> 5:bigint, VectorUDFYearString(col 2:string, fieldStart 0, fieldLength 4) -> 6:int, VectorUDFMonthString(col 2:string, fieldStart 5, fieldLength 2) -> 7:int, VectorUDFDayOfMonthString(col 2:string, fieldStart 8, fieldLength 2) -> 8:int, VectorUDFDayOfMonthString(col 2:string, fieldStart 8, fieldLength 2) -> 9:int, VectorUDFWeekOfYearString(col 2:string) -> 10:int, VectorUDFHourString(col 2:string, fieldStart 11, fieldLength 2) -> 11:int, VectorUDFMinuteString(col 2:string, fieldStart 14, fieldLength 2) -> 12:int, VectorUDFSecondString(col 2:string, fieldStart 17, fieldLength 2) -> 13:int - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: bigint) sort order: + @@ -466,7 +466,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: int), _col8 (type: int) Execution mode: vectorized Map Vectorization: @@ -486,10 +486,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: bigint), VALUE._col0 (type: int), VALUE._col1 (type: int), VALUE._col2 (type: int), VALUE._col3 (type: int), VALUE._col4 (type: int), VALUE._col5 (type: int), VALUE._col6 (type: int), VALUE._col7 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -623,7 +623,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_string - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -634,7 +634,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [7, 8, 9, 10, 11, 12, 13, 14, 15] selectExpressions: LongColEqualLongColumn(col 5:bigint, col 6:bigint)(children: VectorUDFUnixTimeStampTimestamp(col 1:timestamp) -> 5:bigint, VectorUDFUnixTimeStampString(col 2:string) -> 6:bigint) -> 7:boolean, LongColEqualLongColumn(col 5:int, col 6:int)(children: VectorUDFYearTimestamp(col 1:timestamp, field YEAR) -> 5:int, VectorUDFYearString(col 2:string, fieldStart 0, fieldLength 4) -> 6:int) -> 8:boolean, LongColEqualLongColumn(col 5:int, col 6:int)(children: VectorUDFMonthTimestamp(col 1:timestamp, field MONTH) -> 5:int, VectorUDFMonthString(col 2:string, fieldStart 5, fieldLength 2) -> 6:int) -> 9:boolean, LongColEqualLongColumn(col 5:int, col 6:int)(children: VectorUDFDayOfMonthTimestamp(col 1:timestamp, field DAY_OF_MONTH) -> 5:int, VectorUDFDayOfMonthString(col 2:string, fieldStart 8, fieldLength 2) -> 6:int) -> 10:boolean, LongColEqualLongColumn(col 5:int, col 6:int)(children: VectorUDFDayOfMonthTimestamp(col 1:timestamp, field DAY_OF_MONTH) -> 5:int, VectorUDFDayOfMonthString(col 2:string, fieldStart 8, fieldLength 2) -> 6:int) -> 11:boolean, LongColEqualLongColumn(col 5:int, col 6:int)(children: VectorUDFWeekOfYearTimestamp(col 1:timestamp, field WEEK_OF_YEAR) -> 5:int, VectorUDFWeekOfYearString(col 2:string) -> 6:int) -> 12:boolean, LongColEqualLongColumn(col 5:int, col 6:int)(children: VectorUDFHourTimestamp(col 1:timestamp, field HOUR_OF_DAY) -> 5:int, VectorUDFHourString(col 2:string, fieldStart 11, fieldLength 2) -> 6:int) -> 13:boolean, LongColEqualLongColumn(col 5:int, col 6:int)(children: VectorUDFMinuteTimestamp(col 1:timestamp, field MINUTE) -> 5:int, VectorUDFMinuteString(col 2:string, fieldStart 14, fieldLength 2) -> 6:int) -> 14:boolean, LongColEqualLongColumn(col 5:int, col 6:int)(children: VectorUDFSecondTimestamp(col 1:timestamp, field SECOND) -> 5:int, VectorUDFSecondString(col 2:string, fieldStart 17, fieldLength 2) -> 6:int) -> 15:boolean - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: boolean) sort order: + @@ -643,7 +643,7 @@ STAGE PLANS: native: false nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: boolean), _col2 (type: boolean), _col3 (type: boolean), _col4 (type: boolean), _col5 (type: boolean), _col6 (type: boolean), _col7 (type: boolean), _col8 (type: boolean) Execution mode: vectorized Map Vectorization: @@ -663,10 +663,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: boolean), VALUE._col0 (type: boolean), VALUE._col1 (type: boolean), VALUE._col2 (type: boolean), VALUE._col3 (type: boolean), VALUE._col4 (type: boolean), VALUE._col5 (type: boolean), VALUE._col6 (type: boolean), VALUE._col7 (type: boolean) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -916,7 +916,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_string - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -926,7 +926,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [1] - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: min(ctimestamp1), max(ctimestamp1), count(ctimestamp1), count() Group By Vectorization: @@ -1023,7 +1023,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_string - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -1033,7 +1033,7 @@ STAGE PLANS: className: VectorSelectOperator native: true projectedOutputColumnNums: [1] - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(ctimestamp1) Group By Vectorization: @@ -1142,7 +1142,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_string - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE TableScan Vectorization: native: true Select Operator @@ -1153,7 +1153,7 @@ STAGE PLANS: native: true projectedOutputColumnNums: [1, 5, 8] selectExpressions: CastTimestampToDouble(col 1:timestamp) -> 5:double, DoubleColMultiplyDoubleColumn(col 6:double, col 7:double)(children: CastTimestampToDouble(col 1:timestamp) -> 6:double, CastTimestampToDouble(col 1:timestamp) -> 7:double) -> 8:double - Statistics: Num rows: 52 Data size: 3515 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 52 Data size: 3179 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col0), count(_col0), sum(_col2), sum(_col1) Group By Vectorization: http://git-wip-us.apache.org/repos/asf/hive/blob/812d57f8/standalone-metastore/pom.xml ---------------------------------------------------------------------- diff --git a/standalone-metastore/pom.xml b/standalone-metastore/pom.xml index a4e68ae..9bb318c 100644 --- a/standalone-metastore/pom.xml +++ b/standalone-metastore/pom.xml @@ -80,7 +80,7 @@ <libthrift.version>0.9.3</libthrift.version> <log4j2.version>2.8.2</log4j2.version> <mockito-all.version>1.10.19</mockito-all.version> - <orc.version>1.4.3</orc.version> + <orc.version>1.5.0</orc.version> <protobuf.version>2.5.0</protobuf.version> <sqlline.version>1.3.0</sqlline.version> <storage-api.version>2.6.1-SNAPSHOT</storage-api.version>