http://git-wip-us.apache.org/repos/asf/hive/blob/975a49b6/ql/src/test/results/clientpositive/tez/metadataonly1.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/tez/metadataonly1.q.out b/ql/src/test/results/clientpositive/tez/metadataonly1.q.out deleted file mode 100644 index c79f34a..0000000 --- a/ql/src/test/results/clientpositive/tez/metadataonly1.q.out +++ /dev/null @@ -1,1948 +0,0 @@ -PREHOOK: query: CREATE TABLE TEST1(A INT, B DOUBLE) partitioned by (ds string) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@TEST1 -POSTHOOK: query: CREATE TABLE TEST1(A INT, B DOUBLE) partitioned by (ds string) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@TEST1 -PREHOOK: query: explain extended select max(ds) from TEST1 -PREHOOK: type: QUERY -POSTHOOK: query: explain extended select max(ds) from TEST1 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: test1 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - GatherStats: false - Select Operator - expressions: ds (type: string) - outputColumnNames: ds - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: max(ds) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - tag: -1 - value expressions: _col0 (type: string) - auto parallelism: false - Reducer 2 - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: max(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0 - columns.types string - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select max(ds) from TEST1 -PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -#### A masked pattern was here #### -POSTHOOK: query: select max(ds) from TEST1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -#### A masked pattern was here #### -NULL -PREHOOK: query: alter table TEST1 add partition (ds='1') -PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@test1 -POSTHOOK: query: alter table TEST1 add partition (ds='1') -POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@test1 -POSTHOOK: Output: default@test1@ds=1 -PREHOOK: query: explain extended select max(ds) from TEST1 -PREHOOK: type: QUERY -POSTHOOK: query: explain extended select max(ds) from TEST1 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: test1 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - GatherStats: false - Select Operator - expressions: ds (type: string) - outputColumnNames: ds - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: max(ds) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - tag: -1 - value expressions: _col0 (type: string) - auto parallelism: false - Path -> Alias: - nullscan://null/default.test1/part_ds=1_ [test1] - Path -> Partition: - nullscan://null/default.test1/part_ds=1_ - Partition - input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 1 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test1 - numFiles 0 - numRows 0 - partition_columns ds - partition_columns.types string - rawDataSize 0 - serialization.ddl struct test1 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.NullStructSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test1 - partition_columns ds - partition_columns.types string - serialization.ddl struct test1 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 - name: default.test1 - Truncated Path -> Alias: - nullscan://null/default.test1/part_ds=1_ [test1] - Reducer 2 - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: max(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0 - columns.types string - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select max(ds) from TEST1 -PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test1@ds=1 -#### A masked pattern was here #### -POSTHOOK: query: select max(ds) from TEST1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test1@ds=1 -#### A masked pattern was here #### -1 -PREHOOK: query: explain extended select count(distinct ds) from TEST1 -PREHOOK: type: QUERY -POSTHOOK: query: explain extended select count(distinct ds) from TEST1 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: test1 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - GatherStats: false - Select Operator - expressions: ds (type: string) - outputColumnNames: ds - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count(DISTINCT ds) - keys: ds (type: string) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: string) - null sort order: a - sort order: + - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE - tag: -1 - auto parallelism: false - Path -> Alias: - nullscan://null/default.test1/part_ds=1_ [test1] - Path -> Partition: - nullscan://null/default.test1/part_ds=1_ - Partition - input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 1 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test1 - numFiles 0 - numRows 0 - partition_columns ds - partition_columns.types string - rawDataSize 0 - serialization.ddl struct test1 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.NullStructSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test1 - partition_columns ds - partition_columns.types string - serialization.ddl struct test1 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 - name: default.test1 - Truncated Path -> Alias: - nullscan://null/default.test1/part_ds=1_ [test1] - Reducer 2 - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(DISTINCT KEY._col0:0._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0 - columns.types bigint - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select count(distinct ds) from TEST1 -PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test1@ds=1 -#### A masked pattern was here #### -POSTHOOK: query: select count(distinct ds) from TEST1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test1@ds=1 -#### A masked pattern was here #### -1 -PREHOOK: query: explain extended select count(ds) from TEST1 -PREHOOK: type: QUERY -POSTHOOK: query: explain extended select count(ds) from TEST1 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: test1 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - GatherStats: false - Select Operator - expressions: ds (type: string) - outputColumnNames: ds - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count(ds) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - tag: -1 - value expressions: _col0 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: ds=1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 1 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test1 - numFiles 0 - numRows 0 - partition_columns ds - partition_columns.types string - rawDataSize 0 - serialization.ddl struct test1 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test1 - partition_columns ds - partition_columns.types string - serialization.ddl struct test1 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 - name: default.test1 - Truncated Path -> Alias: - /test1/ds=1 [test1] - Reducer 2 - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0 - columns.types bigint - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select count(ds) from TEST1 -PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test1@ds=1 -#### A masked pattern was here #### -POSTHOOK: query: select count(ds) from TEST1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test1@ds=1 -#### A masked pattern was here #### -0 -PREHOOK: query: alter table TEST1 add partition (ds='2') -PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@test1 -POSTHOOK: query: alter table TEST1 add partition (ds='2') -POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@test1 -POSTHOOK: Output: default@test1@ds=2 -PREHOOK: query: explain extended -select count(*) from TEST1 a2 join (select max(ds) m from TEST1) b on a2.ds=b.m -PREHOOK: type: QUERY -POSTHOOK: query: explain extended -select count(*) from TEST1 a2 join (select max(ds) m from TEST1) b on a2.ds=b.m -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 5 (SIMPLE_EDGE) - Reducer 3 <- Reducer 2 (SIMPLE_EDGE) - Reducer 5 <- Map 4 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: a2 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - GatherStats: false - Select Operator - expressions: ds (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: string) - null sort order: a - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - tag: 0 - auto parallelism: true - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: ds=1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 1 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test1 - numFiles 0 - numRows 0 - partition_columns ds - partition_columns.types string - rawDataSize 0 - serialization.ddl struct test1 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test1 - partition_columns ds - partition_columns.types string - serialization.ddl struct test1 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 - name: default.test1 -#### A masked pattern was here #### - Partition - base file name: ds=2 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 2 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test1 - numFiles 0 - numRows 0 - partition_columns ds - partition_columns.types string - rawDataSize 0 - serialization.ddl struct test1 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test1 - partition_columns ds - partition_columns.types string - serialization.ddl struct test1 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 - name: default.test1 - Truncated Path -> Alias: - /test1/ds=1 [a2] - /test1/ds=2 [a2] - Map 4 - Map Operator Tree: - TableScan - alias: test1 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - GatherStats: false - Select Operator - expressions: ds (type: string) - outputColumnNames: ds - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: max(ds) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - tag: -1 - value expressions: _col0 (type: string) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: ds=1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 1 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test1 - numFiles 0 - numRows 0 - partition_columns ds - partition_columns.types string - rawDataSize 0 - serialization.ddl struct test1 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test1 - partition_columns ds - partition_columns.types string - serialization.ddl struct test1 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 - name: default.test1 -#### A masked pattern was here #### - Partition - base file name: ds=2 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 2 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test1 - numFiles 0 - numRows 0 - partition_columns ds - partition_columns.types string - rawDataSize 0 - serialization.ddl struct test1 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test1 - partition_columns ds - partition_columns.types string - serialization.ddl struct test1 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 - name: default.test1 - Truncated Path -> Alias: - /test1/ds=1 [test1] - /test1/ds=2 [test1] - Reducer 2 - Needs Tagging: false - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: string) - 1 _col0 (type: string) - Position of Big Table: 1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - tag: -1 - value expressions: _col0 (type: bigint) - auto parallelism: false - Reducer 3 - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0 - columns.types bigint - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Reducer 5 - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: max(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - isSamplingPred: false - predicate: _col0 is not null (type: boolean) - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: string) - null sort order: a - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - tag: 1 - auto parallelism: true - Select Operator - expressions: _col0 (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - keys: _col0 (type: string) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - Dynamic Partitioning Event Operator - Target column: ds (string) - Target Input: a2 - Partition key expr: ds - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - Target Vertex: Map 1 - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select count(*) from TEST1 a2 join (select max(ds) m from TEST1) b on a2.ds=b.m -PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test1@ds=1 -PREHOOK: Input: default@test1@ds=2 -#### A masked pattern was here #### -POSTHOOK: query: select count(*) from TEST1 a2 join (select max(ds) m from TEST1) b on a2.ds=b.m -POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test1@ds=1 -POSTHOOK: Input: default@test1@ds=2 -#### A masked pattern was here #### -0 -PREHOOK: query: CREATE TABLE TEST2(A INT, B DOUBLE) partitioned by (ds string, hr string) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@TEST2 -POSTHOOK: query: CREATE TABLE TEST2(A INT, B DOUBLE) partitioned by (ds string, hr string) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@TEST2 -PREHOOK: query: alter table TEST2 add partition (ds='1', hr='1') -PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@test2 -POSTHOOK: query: alter table TEST2 add partition (ds='1', hr='1') -POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@test2 -POSTHOOK: Output: default@test2@ds=1/hr=1 -PREHOOK: query: alter table TEST2 add partition (ds='1', hr='2') -PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@test2 -POSTHOOK: query: alter table TEST2 add partition (ds='1', hr='2') -POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@test2 -POSTHOOK: Output: default@test2@ds=1/hr=2 -PREHOOK: query: alter table TEST2 add partition (ds='1', hr='3') -PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@test2 -POSTHOOK: query: alter table TEST2 add partition (ds='1', hr='3') -POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@test2 -POSTHOOK: Output: default@test2@ds=1/hr=3 -PREHOOK: query: explain extended select ds, count(distinct hr) from TEST2 group by ds -PREHOOK: type: QUERY -POSTHOOK: query: explain extended select ds, count(distinct hr) from TEST2 group by ds -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: test2 - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE - GatherStats: false - Select Operator - expressions: ds (type: string), hr (type: string) - outputColumnNames: ds, hr - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count(DISTINCT hr) - keys: ds (type: string), hr (type: string) - mode: hash - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 376 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: string) - null sort order: aa - sort order: ++ - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1 Data size: 376 Basic stats: COMPLETE Column stats: COMPLETE - tag: -1 - auto parallelism: true - Path -> Alias: - nullscan://null/default.test2/part_ds=1_hr=1_ [test2] - nullscan://null/default.test2/part_ds=1_hr=2_ [test2] - nullscan://null/default.test2/part_ds=1_hr=3_ [test2] - Path -> Partition: - nullscan://null/default.test2/part_ds=1_hr=1_ - Partition - input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 1 - hr 1 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test2 - numFiles 0 - numRows 0 - partition_columns ds/hr - partition_columns.types string:string - rawDataSize 0 - serialization.ddl struct test2 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.NullStructSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test2 - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct test2 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test2 - name: default.test2 - nullscan://null/default.test2/part_ds=1_hr=2_ - Partition - input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 1 - hr 2 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test2 - numFiles 0 - numRows 0 - partition_columns ds/hr - partition_columns.types string:string - rawDataSize 0 - serialization.ddl struct test2 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.NullStructSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test2 - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct test2 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test2 - name: default.test2 - nullscan://null/default.test2/part_ds=1_hr=3_ - Partition - input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 1 - hr 3 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test2 - numFiles 0 - numRows 0 - partition_columns ds/hr - partition_columns.types string:string - rawDataSize 0 - serialization.ddl struct test2 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.NullStructSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test2 - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct test2 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test2 - name: default.test2 - Truncated Path -> Alias: - nullscan://null/default.test2/part_ds=1_hr=1_ [test2] - nullscan://null/default.test2/part_ds=1_hr=2_ [test2] - nullscan://null/default.test2/part_ds=1_hr=3_ [test2] - Reducer 2 - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(DISTINCT KEY._col1:0._col0) - keys: KEY._col0 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1 - columns.types string:bigint - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select ds, count(distinct hr) from TEST2 group by ds -PREHOOK: type: QUERY -PREHOOK: Input: default@test2 -PREHOOK: Input: default@test2@ds=1/hr=1 -PREHOOK: Input: default@test2@ds=1/hr=2 -PREHOOK: Input: default@test2@ds=1/hr=3 -#### A masked pattern was here #### -POSTHOOK: query: select ds, count(distinct hr) from TEST2 group by ds -POSTHOOK: type: QUERY -POSTHOOK: Input: default@test2 -POSTHOOK: Input: default@test2@ds=1/hr=1 -POSTHOOK: Input: default@test2@ds=1/hr=2 -POSTHOOK: Input: default@test2@ds=1/hr=3 -#### A masked pattern was here #### -1 3 -PREHOOK: query: explain extended select ds, count(hr) from TEST2 group by ds -PREHOOK: type: QUERY -POSTHOOK: query: explain extended select ds, count(hr) from TEST2 group by ds -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: test2 - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE - GatherStats: false - Select Operator - expressions: ds (type: string), hr (type: string) - outputColumnNames: ds, hr - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count(hr) - keys: ds (type: string) - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: string) - null sort order: a - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE - tag: -1 - value expressions: _col1 (type: bigint) - auto parallelism: true - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: hr=1 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 1 - hr 1 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test2 - numFiles 0 - numRows 0 - partition_columns ds/hr - partition_columns.types string:string - rawDataSize 0 - serialization.ddl struct test2 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test2 - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct test2 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test2 - name: default.test2 -#### A masked pattern was here #### - Partition - base file name: hr=2 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 1 - hr 2 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test2 - numFiles 0 - numRows 0 - partition_columns ds/hr - partition_columns.types string:string - rawDataSize 0 - serialization.ddl struct test2 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test2 - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct test2 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test2 - name: default.test2 -#### A masked pattern was here #### - Partition - base file name: hr=3 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 1 - hr 3 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test2 - numFiles 0 - numRows 0 - partition_columns ds/hr - partition_columns.types string:string - rawDataSize 0 - serialization.ddl struct test2 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test2 - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct test2 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test2 - name: default.test2 - Truncated Path -> Alias: - /test2/ds=1/hr=1 [test2] - /test2/ds=1/hr=2 [test2] - /test2/ds=1/hr=3 [test2] - Reducer 2 - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1 - columns.types string:bigint - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select ds, count(hr) from TEST2 group by ds -PREHOOK: type: QUERY -PREHOOK: Input: default@test2 -PREHOOK: Input: default@test2@ds=1/hr=1 -PREHOOK: Input: default@test2@ds=1/hr=2 -PREHOOK: Input: default@test2@ds=1/hr=3 -#### A masked pattern was here #### -POSTHOOK: query: select ds, count(hr) from TEST2 group by ds -POSTHOOK: type: QUERY -POSTHOOK: Input: default@test2 -POSTHOOK: Input: default@test2@ds=1/hr=1 -POSTHOOK: Input: default@test2@ds=1/hr=2 -POSTHOOK: Input: default@test2@ds=1/hr=3 -#### A masked pattern was here #### -PREHOOK: query: explain extended select max(ds) from TEST1 -PREHOOK: type: QUERY -POSTHOOK: query: explain extended select max(ds) from TEST1 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: test1 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - GatherStats: false - Select Operator - expressions: ds (type: string) - outputColumnNames: ds - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: max(ds) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - tag: -1 - value expressions: _col0 (type: string) - auto parallelism: false - Path -> Alias: - nullscan://null/default.test1/part_ds=1_ [test1] - nullscan://null/default.test1/part_ds=2_ [test1] - Path -> Partition: - nullscan://null/default.test1/part_ds=1_ - Partition - input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 1 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test1 - numFiles 0 - numRows 0 - partition_columns ds - partition_columns.types string - rawDataSize 0 - serialization.ddl struct test1 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.NullStructSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test1 - partition_columns ds - partition_columns.types string - serialization.ddl struct test1 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 - name: default.test1 - nullscan://null/default.test1/part_ds=2_ - Partition - input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 2 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test1 - numFiles 0 - numRows 0 - partition_columns ds - partition_columns.types string - rawDataSize 0 - serialization.ddl struct test1 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.NullStructSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test1 - partition_columns ds - partition_columns.types string - serialization.ddl struct test1 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test1 - name: default.test1 - Truncated Path -> Alias: - nullscan://null/default.test1/part_ds=1_ [test1] - nullscan://null/default.test1/part_ds=2_ [test1] - Reducer 2 - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: max(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0 - columns.types string - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select max(ds) from TEST1 -PREHOOK: type: QUERY -PREHOOK: Input: default@test1 -PREHOOK: Input: default@test1@ds=1 -PREHOOK: Input: default@test1@ds=2 -#### A masked pattern was here #### -POSTHOOK: query: select max(ds) from TEST1 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@test1 -POSTHOOK: Input: default@test1@ds=1 -POSTHOOK: Input: default@test1@ds=2 -#### A masked pattern was here #### -2 -PREHOOK: query: select distinct ds from srcpart -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: select distinct ds from srcpart -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -2008-04-08 -2008-04-09 -PREHOOK: query: select min(ds),max(ds) from srcpart -PREHOOK: type: QUERY -PREHOOK: Input: default@srcpart -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -POSTHOOK: query: select min(ds),max(ds) from srcpart -POSTHOOK: type: QUERY -POSTHOOK: Input: default@srcpart -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -#### A masked pattern was here #### -2008-04-08 2008-04-09 -PREHOOK: query: -- HIVE-3594 URI encoding for temporary path -alter table TEST2 add partition (ds='01:10:10', hr='01') -PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@test2 -POSTHOOK: query: -- HIVE-3594 URI encoding for temporary path -alter table TEST2 add partition (ds='01:10:10', hr='01') -POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@test2 -POSTHOOK: Output: default@test2@ds=01%3A10%3A10/hr=01 -PREHOOK: query: alter table TEST2 add partition (ds='01:10:20', hr='02') -PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@test2 -POSTHOOK: query: alter table TEST2 add partition (ds='01:10:20', hr='02') -POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@test2 -POSTHOOK: Output: default@test2@ds=01%3A10%3A20/hr=02 -PREHOOK: query: explain extended select ds, count(distinct hr) from TEST2 group by ds -PREHOOK: type: QUERY -POSTHOOK: query: explain extended select ds, count(distinct hr) from TEST2 group by ds -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: test2 - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE - GatherStats: false - Select Operator - expressions: ds (type: string), hr (type: string) - outputColumnNames: ds, hr - Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count(DISTINCT hr) - keys: ds (type: string), hr (type: string) - mode: hash - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 376 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: string) - null sort order: aa - sort order: ++ - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1 Data size: 376 Basic stats: COMPLETE Column stats: COMPLETE - tag: -1 - auto parallelism: true - Path -> Alias: - nullscan://null/default.test2/part_ds=01_10_10_hr=01_ [test2] - nullscan://null/default.test2/part_ds=01_10_20_hr=02_ [test2] - nullscan://null/default.test2/part_ds=1_hr=1_ [test2] - nullscan://null/default.test2/part_ds=1_hr=2_ [test2] - nullscan://null/default.test2/part_ds=1_hr=3_ [test2] - Path -> Partition: - nullscan://null/default.test2/part_ds=01_10_10_hr=01_ - Partition - input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 01:10:10 - hr 01 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test2 - numFiles 0 - numRows 0 - partition_columns ds/hr - partition_columns.types string:string - rawDataSize 0 - serialization.ddl struct test2 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.NullStructSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test2 - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct test2 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test2 - name: default.test2 - nullscan://null/default.test2/part_ds=01_10_20_hr=02_ - Partition - input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 01:10:20 - hr 02 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test2 - numFiles 0 - numRows 0 - partition_columns ds/hr - partition_columns.types string:string - rawDataSize 0 - serialization.ddl struct test2 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.NullStructSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test2 - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct test2 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test2 - name: default.test2 - nullscan://null/default.test2/part_ds=1_hr=1_ - Partition - input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 1 - hr 1 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test2 - numFiles 0 - numRows 0 - partition_columns ds/hr - partition_columns.types string:string - rawDataSize 0 - serialization.ddl struct test2 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.NullStructSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test2 - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct test2 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test2 - name: default.test2 - nullscan://null/default.test2/part_ds=1_hr=2_ - Partition - input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 1 - hr 2 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test2 - numFiles 0 - numRows 0 - partition_columns ds/hr - partition_columns.types string:string - rawDataSize 0 - serialization.ddl struct test2 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.NullStructSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test2 - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct test2 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test2 - name: default.test2 - nullscan://null/default.test2/part_ds=1_hr=3_ - Partition - input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - ds 1 - hr 3 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test2 - numFiles 0 - numRows 0 - partition_columns ds/hr - partition_columns.types string:string - rawDataSize 0 - serialization.ddl struct test2 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe - totalSize 0 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.NullStructSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns a,b - columns.comments - columns.types int:double -#### A masked pattern was here #### - name default.test2 - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct test2 { i32 a, double b} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.test2 - name: default.test2 - Truncated Path -> Alias: - nullscan://null/default.test2/part_ds=01_10_10_hr=01_ [test2] - nullscan://null/default.test2/part_ds=01_10_20_hr=02_ [test2] - nullscan://null/default.test2/part_ds=1_hr=1_ [test2] - nullscan://null/default.test2/part_ds=1_hr=2_ [test2] - nullscan://null/default.test2/part_ds=1_hr=3_ [test2] - Reducer 2 - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(DISTINCT KEY._col1:0._col0) - keys: KEY._col0 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1 - columns.types string:bigint - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: select ds, count(distinct hr) from TEST2 group by ds -PREHOOK: type: QUERY -PREHOOK: Input: default@test2 -PREHOOK: Input: default@test2@ds=01%3A10%3A10/hr=01 -PREHOOK: Input: default@test2@ds=01%3A10%3A20/hr=02 -PREHOOK: Input: d
<TRUNCATED>
