Repository: hive Updated Branches: refs/heads/branch-3 2f02f199e -> 94c945ef0
HIVE-20379: Rewriting with partitioned materialized views may reference wrong column (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/94c945ef Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/94c945ef Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/94c945ef Branch: refs/heads/branch-3 Commit: 94c945ef0d9ec23d7b851f622e47caf4744a9977 Parents: 2f02f19 Author: Jesus Camacho Rodriguez <[email protected]> Authored: Tue Aug 14 13:14:39 2018 -0700 Committer: Jesus Camacho Rodriguez <[email protected]> Committed: Tue Aug 14 13:23:15 2018 -0700 ---------------------------------------------------------------------- .../hadoop/hive/ql/parse/CalcitePlanner.java | 33 +--- .../hadoop/hive/ql/parse/SemanticAnalyzer.java | 172 ++++++++++------- .../materialized_view_rewrite_part_2.q | 12 +- .../llap/materialized_view_partitioned.q.out | 4 +- .../llap/materialized_view_partitioned_3.q.out | 2 +- .../llap/materialized_view_rewrite_part_1.q.out | 190 +++++++++++-------- .../llap/materialized_view_rewrite_part_2.q.out | 100 ++++++---- 7 files changed, 291 insertions(+), 222 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hive/blob/94c945ef/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java ---------------------------------------------------------------------- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java index 36b685c..d2f04d6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java @@ -488,32 +488,12 @@ public class CalcitePlanner extends SemanticAnalyzer { } // 2. Regen OP plan from optimized AST - if (cboCtx.type == PreCboCtx.Type.VIEW && !materializedView) { + if (cboCtx.type == PreCboCtx.Type.VIEW) { try { - handleCreateViewDDL(newAST); + viewSelect = handleCreateViewDDL(newAST); } catch (SemanticException e) { throw new CalciteViewSemanticException(e.getMessage()); } - } else if (cboCtx.type == PreCboCtx.Type.VIEW && materializedView) { - // Store text of the ORIGINAL QUERY - String originalText = ctx.getTokenRewriteStream().toString( - cboCtx.nodeOfInterest.getTokenStartIndex(), - cboCtx.nodeOfInterest.getTokenStopIndex()); - unparseTranslator.applyTranslations(ctx.getTokenRewriteStream()); - String expandedText = ctx.getTokenRewriteStream().toString( - cboCtx.nodeOfInterest.getTokenStartIndex(), - cboCtx.nodeOfInterest.getTokenStopIndex()); - // Redo create-table/view analysis, because it's not part of - // doPhase1. - // Use the REWRITTEN AST - init(false); - setAST(newAST); - newAST = reAnalyzeViewAfterCbo(newAST); - createVwDesc.setViewOriginalText(originalText); - createVwDesc.setViewExpandedText(expandedText); - viewSelect = newAST; - viewsExpanded = new ArrayList<>(); - viewsExpanded.add(createVwDesc.getViewName()); } else if (cboCtx.type == PreCboCtx.Type.CTAS) { // CTAS init(false); @@ -631,19 +611,20 @@ public class CalcitePlanner extends SemanticAnalyzer { return sinkOp; } - private void handleCreateViewDDL(ASTNode newAST) throws SemanticException { + private ASTNode handleCreateViewDDL(ASTNode ast) throws SemanticException { saveViewDefinition(); String originalText = createVwDesc.getViewOriginalText(); String expandedText = createVwDesc.getViewExpandedText(); List<FieldSchema> schema = createVwDesc.getSchema(); List<FieldSchema> partitionColumns = createVwDesc.getPartCols(); init(false); - setAST(newAST); - newAST = reAnalyzeViewAfterCbo(newAST); + setAST(ast); + ASTNode newAST = reAnalyzeViewAfterCbo(ast); createVwDesc.setViewOriginalText(originalText); createVwDesc.setViewExpandedText(expandedText); createVwDesc.setSchema(schema); createVwDesc.setPartCols(partitionColumns); + return newAST; } /* @@ -1741,7 +1722,7 @@ public class CalcitePlanner extends SemanticAnalyzer { // if it is to create view, we do not use table alias resultSchema = SemanticAnalyzer.convertRowSchemaToResultSetSchema( relToHiveRR.get(calciteGenPlan), - getQB().isView() ? false : HiveConf.getBoolVar(conf, + getQB().isView() || getQB().isMaterializedView() ? false : HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES)); } catch (SemanticException e) { semanticException = e; http://git-wip-us.apache.org/repos/asf/hive/blob/94c945ef/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ---------------------------------------------------------------------- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 2d369f6..7a33b25 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -12608,86 +12608,120 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer { String expandedText = ctx.getTokenRewriteStream().toString( viewSelect.getTokenStartIndex(), viewSelect.getTokenStopIndex()); - if (imposedSchema != null) { - // Merge the names from the imposed schema into the types - // from the derived schema. - StringBuilder sb = new StringBuilder(); - sb.append("SELECT "); - int n = derivedSchema.size(); - for (int i = 0; i < n; ++i) { - if (i > 0) { + if (createVwDesc.isMaterialized()) { + if (createVwDesc.getPartColNames() != null) { + // If we are creating a materialized view and it has partition columns, + // we may need to reorder column projection in expanded query. The reason + // is that Hive assumes that in the partition columns are at the end of + // the MV schema, and if we do not do this, we will have a mismatch between + // the SQL query for the MV and the MV itself. + boolean first = true; + StringBuilder sb = new StringBuilder(); + sb.append("SELECT "); + for (int i = 0; i < derivedSchema.size(); ++i) { + FieldSchema fieldSchema = derivedSchema.get(i); + if (!createVwDesc.getPartColNames().contains(fieldSchema.getName())) { + if (first) { + first = false; + } else { + sb.append(", "); + } + sb.append(HiveUtils.unparseIdentifier(fieldSchema.getName(), conf)); + } + } + for (String partColName : createVwDesc.getPartColNames()) { sb.append(", "); + sb.append(HiveUtils.unparseIdentifier(partColName, conf)); } - FieldSchema fieldSchema = derivedSchema.get(i); - // Modify a copy, not the original - fieldSchema = new FieldSchema(fieldSchema); - // TODO: there's a potential problem here if some table uses external schema like Avro, - // with a very large type name. It seems like the view does not derive the SerDe from - // the table, so it won't be able to just get the type from the deserializer like the - // table does; we won't be able to properly store the type in the RDBMS metastore. - // Not sure if these large cols could be in resultSchema. Ignore this for now 0_o - derivedSchema.set(i, fieldSchema); - sb.append(HiveUtils.unparseIdentifier(fieldSchema.getName(), conf)); - sb.append(" AS "); - String imposedName = imposedSchema.get(i).getName(); - sb.append(HiveUtils.unparseIdentifier(imposedName, conf)); - fieldSchema.setName(imposedName); - // We don't currently allow imposition of a type - fieldSchema.setComment(imposedSchema.get(i).getComment()); - } - sb.append(" FROM ("); - sb.append(expandedText); - sb.append(") "); - sb.append(HiveUtils.unparseIdentifier(createVwDesc.getViewName(), conf)); - expandedText = sb.toString(); - } - - if (createVwDesc.getPartColNames() != null) { - // Make sure all partitioning columns referenced actually - // exist and are in the correct order at the end - // of the list of columns produced by the view. Also move the field - // schema descriptors from derivedSchema to the partitioning key - // descriptor. - List<String> partColNames = createVwDesc.getPartColNames(); - if (partColNames.size() > derivedSchema.size()) { - throw new SemanticException( - ErrorMsg.VIEW_PARTITION_MISMATCH.getMsg()); + sb.append(" FROM ("); + sb.append(expandedText); + sb.append(") "); + sb.append(HiveUtils.unparseIdentifier(createVwDesc.getViewName(), conf)); + expandedText = sb.toString(); } - - // Get the partition columns from the end of derivedSchema. - List<FieldSchema> partitionColumns = derivedSchema.subList( - derivedSchema.size() - partColNames.size(), - derivedSchema.size()); - - // Verify that the names match the PARTITIONED ON clause. - Iterator<String> colNameIter = partColNames.iterator(); - Iterator<FieldSchema> schemaIter = partitionColumns.iterator(); - while (colNameIter.hasNext()) { - String colName = colNameIter.next(); - FieldSchema fieldSchema = schemaIter.next(); - if (!fieldSchema.getName().equals(colName)) { + } else { + if (imposedSchema != null) { + // Merge the names from the imposed schema into the types + // from the derived schema. + StringBuilder sb = new StringBuilder(); + sb.append("SELECT "); + int n = derivedSchema.size(); + for (int i = 0; i < n; ++i) { + if (i > 0) { + sb.append(", "); + } + FieldSchema fieldSchema = derivedSchema.get(i); + // Modify a copy, not the original + fieldSchema = new FieldSchema(fieldSchema); + // TODO: there's a potential problem here if some table uses external schema like Avro, + // with a very large type name. It seems like the view does not derive the SerDe from + // the table, so it won't be able to just get the type from the deserializer like the + // table does; we won't be able to properly store the type in the RDBMS metastore. + // Not sure if these large cols could be in resultSchema. Ignore this for now 0_o + derivedSchema.set(i, fieldSchema); + sb.append(HiveUtils.unparseIdentifier(fieldSchema.getName(), conf)); + sb.append(" AS "); + String imposedName = imposedSchema.get(i).getName(); + sb.append(HiveUtils.unparseIdentifier(imposedName, conf)); + fieldSchema.setName(imposedName); + // We don't currently allow imposition of a type + fieldSchema.setComment(imposedSchema.get(i).getComment()); + } + sb.append(" FROM ("); + sb.append(expandedText); + sb.append(") "); + sb.append(HiveUtils.unparseIdentifier(createVwDesc.getViewName(), conf)); + expandedText = sb.toString(); + } + + if (createVwDesc.getPartColNames() != null) { + // Make sure all partitioning columns referenced actually + // exist and are in the correct order at the end + // of the list of columns produced by the view. Also move the field + // schema descriptors from derivedSchema to the partitioning key + // descriptor. + List<String> partColNames = createVwDesc.getPartColNames(); + if (partColNames.size() > derivedSchema.size()) { throw new SemanticException( ErrorMsg.VIEW_PARTITION_MISMATCH.getMsg()); } - } - // Boundary case: require at least one non-partitioned column - // for consistency with tables. - if (partColNames.size() == derivedSchema.size()) { - throw new SemanticException( - ErrorMsg.VIEW_PARTITION_TOTAL.getMsg()); - } + // Get the partition columns from the end of derivedSchema. + List<FieldSchema> partitionColumns = derivedSchema.subList( + derivedSchema.size() - partColNames.size(), + derivedSchema.size()); + + // Verify that the names match the PARTITIONED ON clause. + Iterator<String> colNameIter = partColNames.iterator(); + Iterator<FieldSchema> schemaIter = partitionColumns.iterator(); + while (colNameIter.hasNext()) { + String colName = colNameIter.next(); + FieldSchema fieldSchema = schemaIter.next(); + if (!fieldSchema.getName().equals(colName)) { + throw new SemanticException( + ErrorMsg.VIEW_PARTITION_MISMATCH.getMsg()); + } + } - // Now make a copy. - createVwDesc.setPartCols( - new ArrayList<FieldSchema>(partitionColumns)); + // Boundary case: require at least one non-partitioned column + // for consistency with tables. + if (partColNames.size() == derivedSchema.size()) { + throw new SemanticException( + ErrorMsg.VIEW_PARTITION_TOTAL.getMsg()); + } - // Finally, remove the partition columns from the end of derivedSchema. - // (Clearing the subList writes through to the underlying - // derivedSchema ArrayList.) - partitionColumns.clear(); + // Now make a copy. + createVwDesc.setPartCols( + new ArrayList<FieldSchema>(partitionColumns)); + + // Finally, remove the partition columns from the end of derivedSchema. + // (Clearing the subList writes through to the underlying + // derivedSchema ArrayList.) + partitionColumns.clear(); + } } + // Set schema and expanded text for the view createVwDesc.setSchema(derivedSchema); createVwDesc.setViewExpandedText(expandedText); } http://git-wip-us.apache.org/repos/asf/hive/blob/94c945ef/ql/src/test/queries/clientpositive/materialized_view_rewrite_part_2.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/materialized_view_rewrite_part_2.q b/ql/src/test/queries/clientpositive/materialized_view_rewrite_part_2.q index 505f750..b2e6ebd 100644 --- a/ql/src/test/queries/clientpositive/materialized_view_rewrite_part_2.q +++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_part_2.q @@ -57,11 +57,11 @@ analyze table mv1_part_n0 compute statistics for columns; explain select empid from emps_n00 -join depts_n00 using (deptno) where depts_n00.deptno > 20 +join depts_n00 using (deptno) where depts_n00.deptno >= 20 group by empid, depts_n00.deptno; select empid from emps_n00 -join depts_n00 using (deptno) where depts_n00.deptno > 20 +join depts_n00 using (deptno) where depts_n00.deptno >= 20 group by empid, depts_n00.deptno; drop materialized view mv1_part_n0; @@ -75,11 +75,11 @@ analyze table mv1_part_n0 compute statistics for columns; explain select empid from emps_n00 -join depts_n00 using (deptno) where depts_n00.deptno > 20 +join depts_n00 using (deptno) where depts_n00.deptno >= 20 group by empid, depts_n00.deptno; select empid from emps_n00 -join depts_n00 using (deptno) where depts_n00.deptno > 20 +join depts_n00 using (deptno) where depts_n00.deptno >= 20 group by empid, depts_n00.deptno; drop materialized view mv1_part_n0; @@ -93,11 +93,11 @@ analyze table mv1_part_n0 compute statistics for columns; explain select empid from emps_n00 -join depts_n00 using (deptno) where depts_n00.deptno > 20 +join depts_n00 using (deptno) where depts_n00.deptno >= 20 group by empid, depts_n00.deptno; select empid from emps_n00 -join depts_n00 using (deptno) where depts_n00.deptno > 20 +join depts_n00 using (deptno) where depts_n00.deptno >= 20 group by empid, depts_n00.deptno; drop materialized view mv1_part_n0; http://git-wip-us.apache.org/repos/asf/hive/blob/94c945ef/ql/src/test/results/clientpositive/llap/materialized_view_partitioned.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_partitioned.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_partitioned.q.out index d1db5b8..e68ec4a 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_partitioned.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_partitioned.q.out @@ -64,7 +64,7 @@ STAGE PLANS: Create View partition columns: key string columns: value string - expanded text: SELECT `src_txn`.`value`, `src_txn`.`key` FROM `default`.`src_txn` where `src_txn`.`key` > 200 and `src_txn`.`key` < 250 + expanded text: SELECT `value`, `key` FROM (SELECT `src_txn`.`value`, `src_txn`.`key` FROM `default`.`src_txn` where `src_txn`.`key` > 200 and `src_txn`.`key` < 250) `default.partition_mv_1` name: default.partition_mv_1 original text: SELECT value, key FROM src_txn where key > 200 and key < 250 rewrite enabled: true @@ -205,7 +205,7 @@ Sort Columns: [] # Materialized View Information Original Query: SELECT value, key FROM src_txn where key > 200 and key < 250 -Expanded Query: SELECT `src_txn`.`value`, `src_txn`.`key` FROM `default`.`src_txn` where `src_txn`.`key` > 200 and `src_txn`.`key` < 250 +Expanded Query: SELECT `value`, `key` FROM (SELECT `src_txn`.`value`, `src_txn`.`key` FROM `default`.`src_txn` where `src_txn`.`key` > 200 and `src_txn`.`key` < 250) `default.partition_mv_1` Rewrite Enabled: Yes Outdated for Rewriting: No PREHOOK: query: EXPLAIN http://git-wip-us.apache.org/repos/asf/hive/blob/94c945ef/ql/src/test/results/clientpositive/llap/materialized_view_partitioned_3.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_partitioned_3.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_partitioned_3.q.out index 726c660..3751ff1 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_partitioned_3.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_partitioned_3.q.out @@ -82,7 +82,7 @@ STAGE PLANS: Create View partition columns: key string columns: value string - expanded text: SELECT `src_txn`.`value`, `src_txn`.`key` FROM `default`.`src_txn` where `src_txn`.`key` > 200 and `src_txn`.`key` < 250 + expanded text: SELECT `value`, `key` FROM (SELECT `src_txn`.`value`, `src_txn`.`key` FROM `default`.`src_txn` where `src_txn`.`key` > 200 and `src_txn`.`key` < 250) `default.partition_mv_sdp` name: default.partition_mv_sdp original text: SELECT value, key FROM src_txn where key > 200 and key < 250 rewrite enabled: true http://git-wip-us.apache.org/repos/asf/hive/blob/94c945ef/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_part_1.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_part_1.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_part_1.q.out index 7317ff4..29f9408 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_part_1.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_part_1.q.out @@ -232,18 +232,18 @@ STAGE PLANS: alias: default.mv1_part_n2 Statistics: Num rows: 3 Data size: 315 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((empid < 120) and name is not null) (type: boolean) + predicate: (empid < 120) (type: boolean) Statistics: Num rows: 3 Data size: 315 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: empid (type: int), UDFToInteger(name) (type: int), CAST( salary AS varchar(256)) (type: varchar(256)), UDFToFloat(commission) (type: float), deptno (type: int) + expressions: empid (type: int), name (type: varchar(256)), salary (type: float), commission (type: int), deptno (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 3 Data size: 1068 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 315 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: _col1 (type: int) + key expressions: _col4 (type: int) sort order: + - Map-reduce partition columns: _col1 (type: int) - Statistics: Num rows: 3 Data size: 1068 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int), _col2 (type: varchar(256)), _col3 (type: float), _col4 (type: int) + Map-reduce partition columns: _col4 (type: int) + Statistics: Num rows: 3 Data size: 315 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: varchar(256)), _col2 (type: float), _col3 (type: int) Execution mode: llap LLAP IO: all inputs Map 3 @@ -261,6 +261,21 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 3 Data size: 291 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: varchar(256)), _col2 (type: int) + Select Operator + expressions: _col0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 3 Data size: 291 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: _col0 (type: int) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 97 Basic stats: COMPLETE Column stats: COMPLETE + Dynamic Partitioning Event Operator + Target column: deptno (int) + Target Input: default.mv1_part_n2 + Partition key expr: deptno + Statistics: Num rows: 1 Data size: 97 Basic stats: COMPLETE Column stats: COMPLETE + Target Vertex: Map 1 Execution mode: llap LLAP IO: may be used (ACID table) Reducer 2 @@ -270,17 +285,17 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 _col1 (type: int) + 0 _col4 (type: int) 1 _col0 (type: int) - outputColumnNames: _col0, _col2, _col3, _col4, _col5, _col6, _col7 - Statistics: Num rows: 3 Data size: 1347 Basic stats: COMPLETE Column stats: COMPLETE + outputColumnNames: _col0, _col1, _col2, _col3, _col5, _col6, _col7 + Statistics: Num rows: 3 Data size: 594 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col5 (type: int), _col0 (type: int), _col2 (type: varchar(256)), _col3 (type: float), _col4 (type: int), _col6 (type: varchar(256)), _col7 (type: int) + expressions: _col5 (type: int), _col0 (type: int), _col1 (type: varchar(256)), _col2 (type: float), _col3 (type: int), _col6 (type: varchar(256)), _col7 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 3 Data size: 1347 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 594 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 3 Data size: 1347 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 594 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -310,6 +325,9 @@ POSTHOOK: Input: default@emps_n30 POSTHOOK: Input: default@mv1_part_n2 POSTHOOK: Input: default@mv1_part_n2@deptno=10 #### A masked pattern was here #### +10 100 Bill 10000.0 1000 Sales 10 +10 110 Bill 10000.0 250 Sales 10 +10 110 Theodore 10000.0 250 Sales 10 PREHOOK: query: drop materialized view mv1_part_n2 PREHOOK: type: DROP_MATERIALIZED_VIEW PREHOOK: Input: default@mv1_part_n2 @@ -386,19 +404,16 @@ STAGE PLANS: TableScan alias: default.mv1_part_n2 Statistics: Num rows: 5 Data size: 505 Basic stats: COMPLETE Column stats: PARTIAL - Filter Operator - predicate: name is not null (type: boolean) + Select Operator + expressions: name (type: varchar(256)), salary (type: float), commission (type: int), deptno (type: int) + outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 5 Data size: 505 Basic stats: COMPLETE Column stats: PARTIAL - Select Operator - expressions: UDFToInteger(name) (type: int), CAST( salary AS varchar(256)) (type: varchar(256)), UDFToFloat(commission) (type: float), deptno (type: int) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 5 Data size: 1760 Basic stats: COMPLETE Column stats: PARTIAL - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 5 Data size: 1760 Basic stats: COMPLETE Column stats: PARTIAL - value expressions: _col1 (type: varchar(256)), _col2 (type: float), _col3 (type: int) + Reduce Output Operator + key expressions: _col3 (type: int) + sort order: + + Map-reduce partition columns: _col3 (type: int) + Statistics: Num rows: 5 Data size: 505 Basic stats: COMPLETE Column stats: PARTIAL + value expressions: _col0 (type: varchar(256)), _col1 (type: float), _col2 (type: int) Execution mode: llap LLAP IO: all inputs Map 3 @@ -415,6 +430,21 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: _col0 (type: int) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Dynamic Partitioning Event Operator + Target column: deptno (int) + Target Input: default.mv1_part_n2 + Partition key expr: deptno + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Target Vertex: Map 1 Execution mode: llap LLAP IO: may be used (ACID table) Reducer 2 @@ -424,21 +454,17 @@ STAGE PLANS: condition map: Inner Join 0 to 1 keys: - 0 _col0 (type: int) + 0 _col3 (type: int) 1 _col0 (type: int) - outputColumnNames: _col1, _col2, _col3 - Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL - Select Operator - expressions: _col1 (type: varchar(256)), _col2 (type: float), _col3 (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL - File Output Operator - compressed: false - Statistics: Num rows: 5 Data size: 1740 Basic stats: COMPLETE Column stats: PARTIAL - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 5 Data size: 485 Basic stats: COMPLETE Column stats: PARTIAL + File Output Operator + compressed: false + Statistics: Num rows: 5 Data size: 485 Basic stats: COMPLETE Column stats: PARTIAL + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Stage: Stage-0 Fetch Operator @@ -466,6 +492,11 @@ POSTHOOK: Input: default@mv1_part_n2 POSTHOOK: Input: default@mv1_part_n2@deptno=10 POSTHOOK: Input: default@mv1_part_n2@deptno=20 #### A masked pattern was here #### +Bill 10000.0 1000 +Bill 10000.0 250 +Eric 8000.0 500 +Sebastian 7000.0 NULL +Theodore 10000.0 250 PREHOOK: query: drop materialized view mv1_part_n2 PREHOOK: type: DROP_MATERIALIZED_VIEW PREHOOK: Input: default@mv1_part_n2 @@ -541,7 +572,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 106 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 5 Data size: 526 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -557,12 +588,12 @@ STAGE PLANS: predicate: (empid < 150) (type: boolean) Statistics: Num rows: 4 Data size: 420 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: empid (type: int), UDFToInteger(name) (type: int), CAST( salary AS varchar(256)) (type: varchar(256)), UDFToFloat(commission) (type: float), deptno (type: int) + expressions: empid (type: int), deptno (type: int), name (type: varchar(256)), salary (type: float), commission (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 4 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 420 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 1530 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 5 Data size: 526 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -592,9 +623,9 @@ POSTHOOK: Input: default@emps_n30 POSTHOOK: Input: default@mv1_part_n2 POSTHOOK: Input: default@mv1_part_n2@deptno=10 #### A masked pattern was here #### -100 NULL 10000.0 1000.0 10 -110 NULL 10000.0 250.0 10 -110 NULL 10000.0 250.0 10 +100 10 Bill 10000.0 1000 +110 10 Bill 10000.0 250 +110 10 Theodore 10000.0 250 150 10 Sebastian 7000.0 NULL 200 20 Eric 8000.0 500 PREHOOK: query: drop materialized view mv1_part_n2 @@ -669,7 +700,7 @@ STAGE PLANS: TableScan alias: default.mv1_part_n2 Select Operator - expressions: CAST( salary AS varchar(256)) (type: varchar(256)), UDFToFloat(name) (type: float) + expressions: name (type: varchar(256)), salary (type: float) outputColumnNames: _col0, _col1 ListSink @@ -691,10 +722,10 @@ POSTHOOK: Input: default@mv1_part_n2@name=Eric POSTHOOK: Input: default@mv1_part_n2@name=Sebastian POSTHOOK: Input: default@mv1_part_n2@name=Theodore #### A masked pattern was here #### -10000.0 NULL -10000.0 NULL -7000.0 NULL -8000.0 NULL +Bill 10000.0 +Eric 8000.0 +Sebastian 7000.0 +Theodore 10000.0 PREHOOK: query: drop materialized view mv1_part_n2 PREHOOK: type: DROP_MATERIALIZED_VIEW PREHOOK: Input: default@mv1_part_n2 @@ -772,13 +803,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: default.mv1_part_n2 - Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1376 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: CAST( salary AS varchar(256)) (type: varchar(256)) - outputColumnNames: _col0 - Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + expressions: name (type: varchar(256)) + outputColumnNames: name + Statistics: Num rows: 4 Data size: 1376 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - keys: _col0 (type: varchar(256)) + keys: name (type: varchar(256)) mode: hash outputColumnNames: _col0 Statistics: Num rows: 2 Data size: 680 Basic stats: COMPLETE Column stats: COMPLETE @@ -829,9 +860,10 @@ POSTHOOK: Input: default@mv1_part_n2@name=Eric POSTHOOK: Input: default@mv1_part_n2@name=Sebastian POSTHOOK: Input: default@mv1_part_n2@name=Theodore #### A masked pattern was here #### -10000.0 -7000.0 -8000.0 +Bill +Eric +Sebastian +Theodore PREHOOK: query: drop materialized view mv1_part_n2 PREHOOK: type: DROP_MATERIALIZED_VIEW PREHOOK: Input: default@mv1_part_n2 @@ -903,21 +935,21 @@ STAGE PLANS: Map Operator Tree: TableScan alias: default.mv1_part_n2 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 3 Data size: 1032 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: CAST( salary AS varchar(256)) (type: varchar(256)) - outputColumnNames: _col0 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: PARTIAL + expressions: name (type: varchar(256)) + outputColumnNames: name + Statistics: Num rows: 3 Data size: 1032 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - keys: _col0 (type: varchar(256)) + keys: name (type: varchar(256)) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 340 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: varchar(256)) sort order: + Map-reduce partition columns: _col0 (type: varchar(256)) - Statistics: Num rows: 1 Data size: 340 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: llap LLAP IO: all inputs Reducer 2 @@ -927,10 +959,10 @@ STAGE PLANS: keys: KEY._col0 (type: varchar(256)) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 340 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 340 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 1 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -958,8 +990,9 @@ POSTHOOK: Input: default@mv1_part_n2@name=Bill POSTHOOK: Input: default@mv1_part_n2@name=Sebastian POSTHOOK: Input: default@mv1_part_n2@name=Theodore #### A masked pattern was here #### -10000.0 -7000.0 +Bill +Sebastian +Theodore PREHOOK: query: drop materialized view mv1_part_n2 PREHOOK: type: DROP_MATERIALIZED_VIEW PREHOOK: Input: default@mv1_part_n2 @@ -1047,13 +1080,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: default.mv1_part_n2 - Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: CAST( salary AS varchar(256)) (type: varchar(256)) - outputColumnNames: _col0 - Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + expressions: name (type: varchar(256)) + outputColumnNames: name + Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator - keys: _col0 (type: varchar(256)) + keys: name (type: varchar(256)) mode: hash outputColumnNames: _col0 Statistics: Num rows: 2 Data size: 680 Basic stats: COMPLETE Column stats: COMPLETE @@ -1104,9 +1137,10 @@ POSTHOOK: Input: default@mv1_part_n2@name=Eric POSTHOOK: Input: default@mv1_part_n2@name=Sebastian POSTHOOK: Input: default@mv1_part_n2@name=Theodore #### A masked pattern was here #### -10000.0 -7000.0 -8000.0 +Bill +Eric +Sebastian +Theodore PREHOOK: query: drop materialized view mv1_part_n2 PREHOOK: type: DROP_MATERIALIZED_VIEW PREHOOK: Input: default@mv1_part_n2 http://git-wip-us.apache.org/repos/asf/hive/blob/94c945ef/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_part_2.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_part_2.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_part_2.q.out index db2a054..8b8e9e6 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_part_2.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_part_2.q.out @@ -209,12 +209,12 @@ POSTHOOK: Output: default@mv1_part_n0@deptno=20 #### A masked pattern was here #### PREHOOK: query: explain select empid from emps_n00 -join depts_n00 using (deptno) where depts_n00.deptno > 20 +join depts_n00 using (deptno) where depts_n00.deptno >= 20 group by empid, depts_n00.deptno PREHOOK: type: QUERY POSTHOOK: query: explain select empid from emps_n00 -join depts_n00 using (deptno) where depts_n00.deptno > 20 +join depts_n00 using (deptno) where depts_n00.deptno >= 20 group by empid, depts_n00.deptno POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -228,28 +228,31 @@ STAGE PLANS: TableScan alias: default.mv1_part_n0 Filter Operator - predicate: (deptno > 20) (type: boolean) + predicate: (deptno >= 20) (type: boolean) Select Operator expressions: empid (type: int) outputColumnNames: _col0 ListSink PREHOOK: query: select empid from emps_n00 -join depts_n00 using (deptno) where depts_n00.deptno > 20 +join depts_n00 using (deptno) where depts_n00.deptno >= 20 group by empid, depts_n00.deptno PREHOOK: type: QUERY PREHOOK: Input: default@depts_n00 PREHOOK: Input: default@emps_n00 PREHOOK: Input: default@mv1_part_n0 +PREHOOK: Input: default@mv1_part_n0@deptno=20 #### A masked pattern was here #### POSTHOOK: query: select empid from emps_n00 -join depts_n00 using (deptno) where depts_n00.deptno > 20 +join depts_n00 using (deptno) where depts_n00.deptno >= 20 group by empid, depts_n00.deptno POSTHOOK: type: QUERY POSTHOOK: Input: default@depts_n00 POSTHOOK: Input: default@emps_n00 POSTHOOK: Input: default@mv1_part_n0 +POSTHOOK: Input: default@mv1_part_n0@deptno=20 #### A masked pattern was here #### +200 PREHOOK: query: drop materialized view mv1_part_n0 PREHOOK: type: DROP_MATERIALIZED_VIEW PREHOOK: Input: default@mv1_part_n0 @@ -295,12 +298,12 @@ POSTHOOK: Output: default@mv1_part_n0@deptno=20 #### A masked pattern was here #### PREHOOK: query: explain select empid from emps_n00 -join depts_n00 using (deptno) where depts_n00.deptno > 20 +join depts_n00 using (deptno) where depts_n00.deptno >= 20 group by empid, depts_n00.deptno PREHOOK: type: QUERY POSTHOOK: query: explain select empid from emps_n00 -join depts_n00 using (deptno) where depts_n00.deptno > 20 +join depts_n00 using (deptno) where depts_n00.deptno >= 20 group by empid, depts_n00.deptno POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -314,14 +317,14 @@ STAGE PLANS: TableScan alias: default.mv1_part_n0 Filter Operator - predicate: (empid > 20) (type: boolean) + predicate: (empid >= 20) (type: boolean) Select Operator expressions: deptno (type: int) outputColumnNames: _col0 ListSink PREHOOK: query: select empid from emps_n00 -join depts_n00 using (deptno) where depts_n00.deptno > 20 +join depts_n00 using (deptno) where depts_n00.deptno >= 20 group by empid, depts_n00.deptno PREHOOK: type: QUERY PREHOOK: Input: default@depts_n00 @@ -330,7 +333,7 @@ PREHOOK: Input: default@mv1_part_n0 PREHOOK: Input: default@mv1_part_n0@deptno=20 #### A masked pattern was here #### POSTHOOK: query: select empid from emps_n00 -join depts_n00 using (deptno) where depts_n00.deptno > 20 +join depts_n00 using (deptno) where depts_n00.deptno >= 20 group by empid, depts_n00.deptno POSTHOOK: type: QUERY POSTHOOK: Input: default@depts_n00 @@ -338,7 +341,7 @@ POSTHOOK: Input: default@emps_n00 POSTHOOK: Input: default@mv1_part_n0 POSTHOOK: Input: default@mv1_part_n0@deptno=20 #### A masked pattern was here #### -20 +200 PREHOOK: query: drop materialized view mv1_part_n0 PREHOOK: type: DROP_MATERIALIZED_VIEW PREHOOK: Input: default@mv1_part_n0 @@ -384,12 +387,12 @@ POSTHOOK: Output: default@mv1_part_n0@deptno=20 #### A masked pattern was here #### PREHOOK: query: explain select empid from emps_n00 -join depts_n00 using (deptno) where depts_n00.deptno > 20 +join depts_n00 using (deptno) where depts_n00.deptno >= 20 group by empid, depts_n00.deptno PREHOOK: type: QUERY POSTHOOK: query: explain select empid from emps_n00 -join depts_n00 using (deptno) where depts_n00.deptno > 20 +join depts_n00 using (deptno) where depts_n00.deptno >= 20 group by empid, depts_n00.deptno POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -403,28 +406,31 @@ STAGE PLANS: TableScan alias: default.mv1_part_n0 Filter Operator - predicate: (deptno > 20) (type: boolean) + predicate: (deptno >= 20) (type: boolean) Select Operator expressions: empid (type: int) outputColumnNames: _col0 ListSink PREHOOK: query: select empid from emps_n00 -join depts_n00 using (deptno) where depts_n00.deptno > 20 +join depts_n00 using (deptno) where depts_n00.deptno >= 20 group by empid, depts_n00.deptno PREHOOK: type: QUERY PREHOOK: Input: default@depts_n00 PREHOOK: Input: default@emps_n00 PREHOOK: Input: default@mv1_part_n0 +PREHOOK: Input: default@mv1_part_n0@deptno=20 #### A masked pattern was here #### POSTHOOK: query: select empid from emps_n00 -join depts_n00 using (deptno) where depts_n00.deptno > 20 +join depts_n00 using (deptno) where depts_n00.deptno >= 20 group by empid, depts_n00.deptno POSTHOOK: type: QUERY POSTHOOK: Input: default@depts_n00 POSTHOOK: Input: default@emps_n00 POSTHOOK: Input: default@mv1_part_n0 +POSTHOOK: Input: default@mv1_part_n0@deptno=20 #### A masked pattern was here #### +200 PREHOOK: query: drop materialized view mv1_part_n0 PREHOOK: type: DROP_MATERIALIZED_VIEW PREHOOK: Input: default@mv1_part_n0 @@ -494,10 +500,12 @@ STAGE PLANS: Processor Tree: TableScan alias: default.mv1_part_n0 - Select Operator - expressions: empid (type: int) - outputColumnNames: _col0 - ListSink + Filter Operator + predicate: (empid > 15) (type: boolean) + Select Operator + expressions: deptno (type: int) + outputColumnNames: _col0 + ListSink PREHOOK: query: select depts_n00.deptno from depts_n00 join emps_n00 using (deptno) where emps_n00.empid > 15 @@ -506,6 +514,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@depts_n00 PREHOOK: Input: default@emps_n00 PREHOOK: Input: default@mv1_part_n0 +PREHOOK: Input: default@mv1_part_n0@deptno=10 PREHOOK: Input: default@mv1_part_n0@deptno=20 #### A masked pattern was here #### POSTHOOK: query: select depts_n00.deptno from depts_n00 @@ -515,9 +524,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@depts_n00 POSTHOOK: Input: default@emps_n00 POSTHOOK: Input: default@mv1_part_n0 +POSTHOOK: Input: default@mv1_part_n0@deptno=10 POSTHOOK: Input: default@mv1_part_n0@deptno=20 #### A masked pattern was here #### -200 +10 +10 +10 +10 +20 PREHOOK: query: drop materialized view mv1_part_n0 PREHOOK: type: DROP_MATERIALIZED_VIEW PREHOOK: Input: default@mv1_part_n0 @@ -593,21 +607,24 @@ STAGE PLANS: Map Operator Tree: TableScan alias: default.mv1_part_n0 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: empid (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - keys: _col0 (type: int) - mode: hash + Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: (empid > 15) (type: boolean) + Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: deptno (type: int) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: _col0 (type: int) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: llap LLAP IO: all inputs Reducer 2 @@ -617,10 +634,10 @@ STAGE PLANS: keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -639,6 +656,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@depts_n00 PREHOOK: Input: default@emps_n00 PREHOOK: Input: default@mv1_part_n0 +PREHOOK: Input: default@mv1_part_n0@deptno=10 PREHOOK: Input: default@mv1_part_n0@deptno=20 #### A masked pattern was here #### POSTHOOK: query: select depts_n00.deptno from depts_n00 @@ -648,9 +666,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@depts_n00 POSTHOOK: Input: default@emps_n00 POSTHOOK: Input: default@mv1_part_n0 +POSTHOOK: Input: default@mv1_part_n0@deptno=10 POSTHOOK: Input: default@mv1_part_n0@deptno=20 #### A masked pattern was here #### -200 +10 +20 PREHOOK: query: drop materialized view mv1_part_n0 PREHOOK: type: DROP_MATERIALIZED_VIEW PREHOOK: Input: default@mv1_part_n0 @@ -745,10 +765,10 @@ STAGE PLANS: alias: default.mv1_part_n0 Statistics: Num rows: 8 Data size: 1560 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((deptno = empid) and (name = name2)) (type: boolean) + predicate: ((deptno = deptno2) and (name = name2)) (type: boolean) Statistics: Num rows: 2 Data size: 390 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: deptno2 (type: int) + expressions: empid (type: int) outputColumnNames: _col0 Statistics: Num rows: 2 Data size: 390 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator
