This is an automated email from the ASF dual-hosted git repository.
krisztiankasa pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new 7bff622689b HIVE-28050: Disable Incremental non aggregated
materialized view rebuild in presence of delete operations (Krisztian Kasa,
reviewed by Stamatis Zampetakis, Aman Sinha)
7bff622689b is described below
commit 7bff622689b022d65046ebdfb775b9821c4df635
Author: Krisztian Kasa <[email protected]>
AuthorDate: Mon Feb 12 08:17:42 2024 +0100
HIVE-28050: Disable Incremental non aggregated materialized view rebuild in
presence of delete operations (Krisztian Kasa, reviewed by Stamatis Zampetakis,
Aman Sinha)
---
.../AlterMaterializedViewRebuildAnalyzer.java | 109 +----
...veJoinInsertDeleteIncrementalRewritingRule.java | 212 --------
.../hadoop/hive/ql/parse/SemanticAnalyzer.java | 3 +-
.../materialized_view_create_rewrite_12.q | 36 ++
.../materialized_view_create_rewrite_5.q | 3 +
.../materialized_view_create_rewrite_8.q | 3 +
.../materialized_view_join_rebuild.q | 33 --
.../materialized_view_repeated_rebuild.q | 37 --
.../llap/materialized_view_create_rewrite_12.q.out | 211 ++++++++
.../llap/materialized_view_create_rewrite_5.q.out | 532 +++++++--------------
.../llap/materialized_view_create_rewrite_8.q.out | 255 ++++------
.../llap/materialized_view_join_rebuild.q.out | 108 -----
.../llap/materialized_view_repeated_rebuild.q.out | 134 ------
13 files changed, 509 insertions(+), 1167 deletions(-)
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rebuild/AlterMaterializedViewRebuildAnalyzer.java
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rebuild/AlterMaterializedViewRebuildAnalyzer.java
index 3542ac06ab0..823e574cb40 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rebuild/AlterMaterializedViewRebuildAnalyzer.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rebuild/AlterMaterializedViewRebuildAnalyzer.java
@@ -44,17 +44,13 @@ import org.apache.hadoop.hive.ql.log.PerfLogger;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.HiveRelOptMaterialization;
import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
import
org.apache.hadoop.hive.ql.optimizer.calcite.HiveTezModelRelMetadataProvider;
import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
import
org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveInBetweenExpandRule;
-import
org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.ColumnPropagationException;
import
org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveAggregateInsertDeleteIncrementalRewritingRule;
import
org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveAggregateInsertIncrementalRewritingRule;
import
org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveAggregatePartitionIncrementalRewritingRule;
import
org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveInsertOnlyScanWriteIdRule;
-import
org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveJoinInsertDeleteIncrementalRewritingRule;
import
org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveJoinInsertIncrementalRewritingRule;
import
org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializationRelMetadataProvider;
import
org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewRule;
@@ -322,8 +318,7 @@ public class AlterMaterializedViewRebuildAnalyzer extends
CalcitePlanner {
}
return applyAggregateInsertDeleteIncremental(basePlan,
mdProvider, executorProvider);
} else {
- return applyJoinInsertDeleteIncremental(
- basePlan, mdProvider, executorProvider, optCluster,
calcitePreMVRewritingPlan);
+ return calcitePreMVRewritingPlan;
}
} else {
return calcitePreMVRewritingPlan;
@@ -373,21 +368,6 @@ public class AlterMaterializedViewRebuildAnalyzer extends
CalcitePlanner {
}
}
- private RelNode applyJoinInsertDeleteIncremental(
- RelNode basePlan, RelMetadataProvider mdProvider, RexExecutor
executorProvider, RelOptCluster optCluster,
- RelNode calcitePreMVRewritingPlan) {
- basePlan = applyIncrementalRebuild(
- basePlan, mdProvider, executorProvider,
HiveJoinInsertDeleteIncrementalRewritingRule.INSTANCE);
- mvRebuildMode = MaterializationRebuildMode.JOIN_INSERT_DELETE_REBUILD;
- try {
- return new
HiveJoinInsertDeleteIncrementalRewritingRule.FilterPropagator(
- HiveRelFactories.HIVE_BUILDER.create(optCluster,
null)).propagate(basePlan);
- } catch (ColumnPropagationException ex) {
- LOG.warn("Exception while propagating column " +
VirtualColumn.ROWISDELETED.getName(), ex);
- return calcitePreMVRewritingPlan;
- }
- }
-
private RelNode applyJoinInsertIncremental(
RelNode basePlan, RelMetadataProvider mdProvider, RexExecutor
executorProvider) {
mvRebuildMode = MaterializationRebuildMode.JOIN_INSERT_REBUILD;
@@ -479,9 +459,6 @@ public class AlterMaterializedViewRebuildAnalyzer extends
CalcitePlanner {
case AGGREGATE_INSERT_DELETE_REBUILD:
fixUpASTAggregateInsertDeleteIncrementalRebuild(fixedAST,
getMaterializedViewASTBuilder());
return fixedAST;
- case JOIN_INSERT_DELETE_REBUILD:
- fixUpASTJoinInsertDeleteIncrementalRebuild(fixedAST,
getMaterializedViewASTBuilder());
- return fixedAST;
default:
throw new UnsupportedOperationException("No materialized view rebuild
exists for mode " + mvRebuildMode);
}
@@ -752,90 +729,6 @@ public class AlterMaterializedViewRebuildAnalyzer extends
CalcitePlanner {
destParent.insertChild(childIndex, newChild);
}
- private void fixUpASTJoinInsertDeleteIncrementalRebuild(ASTNode newAST,
MaterializedViewASTBuilder astBuilder)
- throws SemanticException {
- // Replace INSERT OVERWRITE by MERGE equivalent rewriting.
- // Here we need to do this complex AST rewriting that generates the same
plan
- // that a MERGE clause would generate because CBO does not support MERGE
yet.
- // TODO: Support MERGE as first class member in CBO to simplify this logic.
- // 1) Replace INSERT OVERWRITE by INSERT
- ASTNode insertNode = new ASTSearcher().simpleBreadthFirstSearch(
- newAST, HiveParser.TOK_QUERY, HiveParser.TOK_INSERT);
- ASTNode destinationNode = (ASTNode) insertNode.getChild(0);
- ASTNode newInsertInto = (ASTNode) ParseDriver.adaptor.create(
- HiveParser.TOK_INSERT_INTO, "TOK_INSERT_INTO");
- newInsertInto.addChildren(destinationNode.getChildren());
- ASTNode destinationParentNode = (ASTNode) destinationNode.getParent();
- int childIndex = destinationNode.childIndex;
- destinationParentNode.deleteChild(childIndex);
- destinationParentNode.insertChild(childIndex, newInsertInto);
- // 1.1) Extract name as we will need it afterwards:
- // TOK_DESTINATION TOK_TAB TOK_TABNAME <materialization_name>
- ASTNode materializationNode = new ASTSearcher().simpleBreadthFirstSearch(
- newInsertInto, HiveParser.TOK_INSERT_INTO, HiveParser.TOK_TAB,
HiveParser.TOK_TABNAME);
-
- ASTNode subqueryNodeInputROJ = new ASTSearcher().simpleBreadthFirstSearch(
- newAST, HiveParser.TOK_QUERY, HiveParser.TOK_FROM,
HiveParser.TOK_RIGHTOUTERJOIN,
- HiveParser.TOK_SUBQUERY);
- ASTNode selectNodeInputROJ = new ASTSearcher().simpleBreadthFirstSearch(
- subqueryNodeInputROJ, HiveParser.TOK_SUBQUERY,
HiveParser.TOK_QUERY,
- HiveParser.TOK_INSERT, HiveParser.TOK_SELECT);
- astBuilder.createAcidSortNodes(TableName.getDbTable(
- materializationNode.getChild(0).getText(),
- materializationNode.getChild(1).getText()))
- .forEach(astNode ->
ParseDriver.adaptor.addChild(selectNodeInputROJ, astNode));
-
- ASTNode whereClauseInInsert = findWhereClause(insertNode);
-
- // 2) Add filter condition to Insert
- // Modifying filter condition. The incremental rewriting rule generated an
OR
- // clause where first disjunct contains the condition for the DELETE
branch.
- // TOK_WHERE
- // or
- // . <- DISJUNCT FOR <DELETE>
- // TOK_TABLE_OR_COL
- // $hdt$_0
- // ROW__IS__DELETED
- // TOK_FUNCTION <- DISJUNCT FOR <INSERT>
- // isnull
- // .
- // TOK_TABLE_OR_COL
- // $hdt$_0
- // ROW__IS__DELETED
- if (whereClauseInInsert.getChild(0).getType() != HiveParser.KW_OR) {
- throw new SemanticException("OR clause expected below TOK_WHERE in
incremental rewriting");
- }
- // We bypass the OR clause and select the first disjunct
- int indexDelete;
- int indexInsert;
- if (whereClauseInInsert.getChild(0).getChild(0).getType() ==
HiveParser.KW_AND) {
- indexDelete = 0;
- indexInsert = 1;
- } else if (whereClauseInInsert.getChild(0).getChild(1).getType() ==
HiveParser.KW_AND) {
- indexDelete = 1;
- indexInsert = 0;
- } else {
- throw new SemanticException("Unexpected condition in incremental
rewriting");
- }
- ASTNode deletePredicate =
- (ASTNode)
ParseDriver.adaptor.dupTree(whereClauseInInsert.getChild(0).getChild(indexDelete));
- ASTNode newCondInInsert = (ASTNode)
whereClauseInInsert.getChild(0).getChild(indexInsert);
- ParseDriver.adaptor.setChild(whereClauseInInsert, 0, newCondInInsert);
-
- addDeleteBranch(insertNode, subqueryNodeInputROJ, deletePredicate,
astBuilder);
-
- // 3) Add sort node to delete branch
- ASTNode sortNode = astBuilder.createSortNodes(
- astBuilder.createAcidSortNodes((ASTNode)
subqueryNodeInputROJ.getChild(1)));
- ParseDriver.adaptor.addChild(insertNode.getParent().getChild(2), sortNode);
-
- // 4) Now we set some tree properties related to multi-insert
- // operation with INSERT/UPDATE
- ctx.setOperation(Context.Operation.MERGE);
- ctx.addDestNamePrefix(1, Context.DestClausePrefix.INSERT);
- ctx.addDestNamePrefix(2, Context.DestClausePrefix.DELETE);
- }
-
@Override
protected boolean allowOutputMultipleTimes() {
return true;
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveJoinInsertDeleteIncrementalRewritingRule.java
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveJoinInsertDeleteIncrementalRewritingRule.java
deleted file mode 100644
index 10a9c5c2760..00000000000
---
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveJoinInsertDeleteIncrementalRewritingRule.java
+++ /dev/null
@@ -1,212 +0,0 @@
-package org.apache.hadoop.hive.ql.optimizer.calcite.rules.views;/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.calcite.plan.RelOptRule;
-import org.apache.calcite.plan.RelOptRuleCall;
-import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.core.JoinRelType;
-import org.apache.calcite.rel.core.Union;
-import org.apache.calcite.rel.type.RelDataType;
-import org.apache.calcite.rel.type.RelDataTypeField;
-import org.apache.calcite.rex.RexBuilder;
-import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.rex.RexUtil;
-import org.apache.calcite.sql.fun.SqlStdOperatorTable;
-import org.apache.calcite.tools.RelBuilder;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
-import org.apache.hadoop.hive.ql.parse.CalcitePlanner;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * This rule will perform a rewriting to prepare the plan for incremental
- * view maintenance in case there is no aggregation operator but some of the
- * source tables has delete operations, so we can avoid the INSERT OVERWRITE
and use a
- * MULTI INSERT statement instead: one insert branch for inserted rows
- * and another for inserting deleted rows to delete delta.
- * Since CBO plan does not contain the INSERT branches we focus on the SELECT
part of the plan in this rule.
- * See also {@link CalcitePlanner}
- *
- * FROM (select mv.ROW__ID, mv.a, mv.b, true as flag from mv) mv
- * RIGHT OUTER JOIN (SELECT _source_.ROW__IS_DELETED,_source_.a, _source_.b
FROM _source_) source
- * ON (mv.a <=> source.a AND mv.b <=> source.b)
- * INSERT INTO TABLE mv_delete_delta
- * SELECT mv.ROW__ID
- * WHERE source.ROW__IS__DELETED AND flag
- * INSERT INTO TABLE mv
- * SELECT source.a, source.b
- * WHERE NOT source.ROW__IS__DELETED
- * SORT BY mv.ROW__ID;
- */
-public class HiveJoinInsertDeleteIncrementalRewritingRule extends RelOptRule {
-
- public static final HiveJoinInsertDeleteIncrementalRewritingRule INSTANCE =
- new HiveJoinInsertDeleteIncrementalRewritingRule();
-
- private HiveJoinInsertDeleteIncrementalRewritingRule() {
- super(operand(Union.class, any()),
- HiveRelFactories.HIVE_BUILDER,
- "HiveJoinInsertDeleteIncrementalRewritingRule");
- }
-
- @Override
- public void onMatch(RelOptRuleCall call) {
- final Union union = call.rel(0);
- RexBuilder rexBuilder = union.getCluster().getRexBuilder();
- // First branch is query, second branch is MV
- // 1) First branch is query, second branch is MV
- final RelNode joinLeftInput = union.getInput(1);
- final RelNode joinRightInput = union.getInput(0);
-
- // 2) Build conditions for join and start adding
- // expressions for project operator
- List<RexNode> projExprs = new ArrayList<>();
- List<RexNode> joinConjs = new ArrayList<>();
- for (int leftPos = 0; leftPos <
joinLeftInput.getRowType().getFieldCount(); leftPos++) {
- RexNode leftRef = rexBuilder.makeInputRef(
-
joinLeftInput.getRowType().getFieldList().get(leftPos).getType(), leftPos);
- RexNode rightRef = rexBuilder.makeInputRef(
-
joinRightInput.getRowType().getFieldList().get(leftPos).getType(),
- leftPos + joinLeftInput.getRowType().getFieldCount());
-
- projExprs.add(rightRef);
-
-
joinConjs.add(rexBuilder.makeCall(SqlStdOperatorTable.IS_NOT_DISTINCT_FROM,
leftRef, rightRef));
- }
-
- RexNode joinCond = RexUtil.composeConjunction(rexBuilder, joinConjs);
-
- // 3) Build plan
- RelNode newNode = call.builder()
- .push(union.getInput(1))
- .push(union.getInput(0))
- .join(JoinRelType.RIGHT, joinCond)
- .project(projExprs)
- .build();
- call.transformTo(newNode);
- }
-
- public static class FilterPropagator extends HiveRowIsDeletedPropagator {
-
- private boolean foundTopRightJoin;
-
- public FilterPropagator(RelBuilder relBuilder) {
- super(relBuilder);
- }
-
- @Override
- public RelNode propagate(RelNode relNode) {
- foundTopRightJoin = false;
- return super.propagate(relNode);
- }
-
- @Override
- public RelNode visit(HiveProject project) {
- if (!foundTopRightJoin) {
- return visitChild(project, 0, project.getInput());
- }
-
- // continue traversal and propagate rowIsDeleted column
- return super.visit(project);
- }
-
- @Override
- public RelNode visit(HiveJoin join) {
- if (!foundTopRightJoin) {
- if (join.getJoinType() != JoinRelType.RIGHT) {
- // continue search for top Right Join node
- return visitChildren(join);
- }
-
- foundTopRightJoin = true;
- return createFilter(join);
- }
-
- // continue traversal and propagate rowIsDeleted column
- return super.visit(join);
- }
-
- private RelNode createFilter(HiveJoin join) {
- RexBuilder rexBuilder = relBuilder.getRexBuilder();
- // This should be a Scan on the MV
- RelNode leftInput = join.getLeft();
-
- // This branch is querying the rows should be inserted/deleted into the
view since the last rebuild.
- RelNode rightInput = join.getRight();
-
- RelNode tmpJoin = visitChild(join, 1, rightInput);
- RelNode newRightInput = tmpJoin.getInput(1);
-
- List<RexNode> leftProjects = new
ArrayList<>(leftInput.getRowType().getFieldCount() + 1);
- List<String> leftProjectNames = new
ArrayList<>(leftInput.getRowType().getFieldCount() + 1);
- for (int i = 0; i < leftInput.getRowType().getFieldCount(); ++i) {
- RelDataTypeField relDataTypeField =
leftInput.getRowType().getFieldList().get(i);
- leftProjects.add(rexBuilder.makeInputRef(relDataTypeField.getType(),
i));
- leftProjectNames.add(relDataTypeField.getName());
- }
- List<RexNode> projects = new ArrayList<>(leftProjects.size() +
newRightInput.getRowType().getFieldCount());
- projects.addAll(leftProjects);
- List<String> projectNames = new ArrayList<>(leftProjects.size() +
newRightInput.getRowType().getFieldCount());
- projectNames.addAll(leftProjectNames);
-
- leftProjects.add(rexBuilder.makeLiteral(true));
- leftProjectNames.add("flag");
-
- leftInput = relBuilder
- .push(leftInput)
- .project(leftProjects, leftProjectNames)
- .build();
-
- // Create input ref to flag. It is used in filter condition later.
- int flagIndex = leftProjects.size() - 1;
- RexNode flagNode = rexBuilder.makeInputRef(
- leftInput.getRowType().getFieldList().get(flagIndex).getType(),
flagIndex);
-
- // Create input ref to rowIsDeleteColumn. It is used in filter condition
later.
- RelDataType newRowType = newRightInput.getRowType();
- int rowIsDeletedIdx = newRowType.getFieldCount() - 1;
- RexNode rowIsDeleted = rexBuilder.makeInputRef(
- newRowType.getFieldList().get(rowIsDeletedIdx).getType(),
- leftInput.getRowType().getFieldCount() + rowIsDeletedIdx);
-
- RexNode deleteBranchFilter =
rexBuilder.makeCall(SqlStdOperatorTable.AND, flagNode, rowIsDeleted);
- RexNode insertBranchFilter =
rexBuilder.makeCall(SqlStdOperatorTable.NOT, rowIsDeleted);
-
- for (int i = 0; i < newRowType.getFieldCount() - 1; ++i) {
- RelDataTypeField relDataTypeField = newRowType.getFieldList().get(i);
- projects.add(rexBuilder.makeInputRef(relDataTypeField.getType(),
leftInput.getRowType().getFieldCount() + i));
- projectNames.add(relDataTypeField.getName());
- }
-
- RexNode newJoinCondition = new
InputRefShifter(leftInput.getRowType().getFieldCount() - 1, relBuilder)
- .apply(join.getCondition());
-
- // Create new Top Right Join and a Filter. The filter condition is used
in CalcitePlanner.fixUpASTJoinIncrementalRebuild().
- return relBuilder
- .push(leftInput)
- .push(newRightInput)
- .join(join.getJoinType(), newJoinCondition)
- .filter(rexBuilder.makeCall(SqlStdOperatorTable.OR,
deleteBranchFilter, insertBranchFilter))
- .project(projects, projectNames)
- .build();
- }
- }
-}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 9bc8e03d164..441e19e8ee9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -15978,8 +15978,7 @@ public class SemanticAnalyzer extends
BaseSemanticAnalyzer {
INSERT_OVERWRITE_REBUILD,
AGGREGATE_INSERT_REBUILD,
AGGREGATE_INSERT_DELETE_REBUILD,
- JOIN_INSERT_REBUILD,
- JOIN_INSERT_DELETE_REBUILD
+ JOIN_INSERT_REBUILD
}
/**
diff --git
a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_12.q
b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_12.q
new file mode 100644
index 00000000000..5a159b7e4b8
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_12.q
@@ -0,0 +1,36 @@
+-- Test rebuild of materialized view without aggregate when source tables have
delete operations since last rebuild.
+-- Incremental rebuild is not available.
+
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+create table t1 (a int, b int) stored as orc TBLPROPERTIES
('transactional'='true');
+
+insert into t1 values
+(3, 3),
+(2, 1),
+(2, 2),
+(1, 2),
+(1, 1);
+
+CREATE MATERIALIZED VIEW mat1
+ TBLPROPERTIES ('transactional'='true') AS
+SELECT a
+FROM t1
+WHERE b < 10;
+
+delete from t1 where b = 2;
+
+explain
+alter materialized view mat1 rebuild;
+
+alter materialized view mat1 rebuild;
+
+explain cbo
+SELECT a
+FROM t1
+WHERE b < 10;
+
+SELECT a
+FROM t1
+WHERE b < 10;
diff --git
a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_5.q
b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_5.q
index 5a540250838..cdaf1abd091 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_5.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_5.q
@@ -11,6 +11,8 @@ set hive.materializedview.rewriting=true;
create table cmv_basetable_n6 (a int, b varchar(256), c decimal(10,2), d int)
stored as orc TBLPROPERTIES ('transactional'='true');
insert into cmv_basetable_n6 values
+ (1, 'alfred', 10.30, 2),
+ (1, 'alfred', 10.30, 2),
(1, 'alfred', 10.30, 2),
(2, 'bob', 3.14, 3),
(2, 'bonnie', 172342.2, 3),
@@ -20,6 +22,7 @@ insert into cmv_basetable_n6 values
create table cmv_basetable_2_n3 (a int, b varchar(256), c decimal(10,2), d
int) stored as orc TBLPROPERTIES ('transactional'='true');
insert into cmv_basetable_2_n3 values
+ (1, 'alfred', 10.30, 2),
(1, 'alfred', 10.30, 2),
(3, 'calvin', 978.76, 3);
diff --git
a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_8.q
b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_8.q
index 651bcb4319e..aef9cb9965c 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_8.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_8.q
@@ -7,6 +7,8 @@ set hive.materializedview.rewriting=true;
create table cmv_basetable_n6 (a int, b varchar(256), c decimal(10,2), d int)
stored as orc TBLPROPERTIES ('transactional'='true');
insert into cmv_basetable_n6 values
+ (1, 'alfred', 10.30, 2),
+ (1, 'alfred', 10.30, 2),
(1, 'alfred', 10.30, 2),
(2, 'bob', 3.14, 3),
(2, 'bonnie', 172342.2, 3),
@@ -16,6 +18,7 @@ insert into cmv_basetable_n6 values
create table cmv_basetable_2_n3 (a int, b varchar(256), c decimal(10,2), d
int) stored as orc TBLPROPERTIES ('transactional'='true',
'transactional_properties'='insert_only');
insert into cmv_basetable_2_n3 values
+ (1, 'alfred', 10.30, 2),
(1, 'alfred', 10.30, 2),
(3, 'calvin', 978.76, 3);
diff --git
a/ql/src/test/queries/clientpositive/materialized_view_join_rebuild.q
b/ql/src/test/queries/clientpositive/materialized_view_join_rebuild.q
deleted file mode 100644
index b104c2b637c..00000000000
--- a/ql/src/test/queries/clientpositive/materialized_view_join_rebuild.q
+++ /dev/null
@@ -1,33 +0,0 @@
--- Test Incremental rebuild of materialized view without aggregate when source
tables have
--- delete operations since last rebuild.
--- The view projects only one column.
-
-set hive.support.concurrency=true;
-set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-
-create table cmv_basetable_n6 (a int, b varchar(256), c decimal(10,2), d int)
stored as orc TBLPROPERTIES ('transactional'='true');
-
-insert into cmv_basetable_n6 values
- (1, 'alfred', 10.30, 2),
- (2, 'bob', 3.14, 3),
- (2, 'bonnie', 172342.2, 3),
- (3, 'calvin', 978.76, 3),
- (3, 'charlie', 9.8, 1);
-
-create table cmv_basetable_2_n3 (a int, b varchar(256), c decimal(10,2), d
int) stored as orc TBLPROPERTIES ('transactional'='true');
-
-insert into cmv_basetable_2_n3 values
- (1, 'alfred', 10.30, 2),
- (3, 'calvin', 978.76, 3);
-
-CREATE MATERIALIZED VIEW cmv_mat_view_n6
- TBLPROPERTIES ('transactional'='true') AS
- SELECT cmv_basetable_n6.a
- FROM cmv_basetable_n6 JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a =
cmv_basetable_2_n3.a)
- WHERE cmv_basetable_2_n3.c > 10.0;
-
-DELETE from cmv_basetable_2_n3 WHERE a=1;
-
-ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD;
-
-SELECT * FROM cmv_mat_view_n6;
diff --git
a/ql/src/test/queries/clientpositive/materialized_view_repeated_rebuild.q
b/ql/src/test/queries/clientpositive/materialized_view_repeated_rebuild.q
deleted file mode 100644
index 8fec80b3d2d..00000000000
--- a/ql/src/test/queries/clientpositive/materialized_view_repeated_rebuild.q
+++ /dev/null
@@ -1,37 +0,0 @@
--- Test Incremental rebuild of materialized view without aggregate when source
tables have
--- 1) delete operations since last rebuild.
--- 2) delete records with the same join key from the other joined table
-
-set hive.support.concurrency=true;
-set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
-
-create table cmv_basetable_n6 (a int, b varchar(256), c decimal(10,2), d int)
stored as orc TBLPROPERTIES ('transactional'='true');
-
-insert into cmv_basetable_n6 values
- (1, 'alfred', 10.30, 2),
- (2, 'bob', 3.14, 3),
- (2, 'bonnie', 172342.2, 3),
- (3, 'calvin', 978.76, 3),
- (3, 'charlie', 9.8, 1);
-
-create table cmv_basetable_2_n3 (a int, b varchar(256), c decimal(10,2), d
int) stored as orc TBLPROPERTIES ('transactional'='true');
-
-insert into cmv_basetable_2_n3 values
- (1, 'alfred', 10.30, 2),
- (3, 'calvin', 978.76, 3);
-
-CREATE MATERIALIZED VIEW cmv_mat_view_n6
- TBLPROPERTIES ('transactional'='true') AS
- SELECT cmv_basetable_n6.a, cmv_basetable_2_n3.c
- FROM cmv_basetable_n6 JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a =
cmv_basetable_2_n3.a)
- WHERE cmv_basetable_2_n3.c > 10.0;
-
-DELETE from cmv_basetable_2_n3 WHERE a=1;
-
-ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD;
-
-DELETE FROM cmv_basetable_n6 WHERE a=1;
-
-ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD;
-
-SELECT * FROM cmv_mat_view_n6;
diff --git
a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_12.q.out
b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_12.q.out
new file mode 100644
index 00000000000..81a33edecfa
--- /dev/null
+++
b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_12.q.out
@@ -0,0 +1,211 @@
+PREHOOK: query: create table t1 (a int, b int) stored as orc TBLPROPERTIES
('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t1
+POSTHOOK: query: create table t1 (a int, b int) stored as orc TBLPROPERTIES
('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t1
+PREHOOK: query: insert into t1 values
+(3, 3),
+(2, 1),
+(2, 2),
+(1, 2),
+(1, 1)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@t1
+POSTHOOK: query: insert into t1 values
+(3, 3),
+(2, 1),
+(2, 2),
+(1, 2),
+(1, 1)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t1.a SCRIPT []
+POSTHOOK: Lineage: t1.b SCRIPT []
+PREHOOK: query: CREATE MATERIALIZED VIEW mat1
+ TBLPROPERTIES ('transactional'='true') AS
+SELECT a
+FROM t1
+WHERE b < 10
+PREHOOK: type: CREATE_MATERIALIZED_VIEW
+PREHOOK: Input: default@t1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@mat1
+POSTHOOK: query: CREATE MATERIALIZED VIEW mat1
+ TBLPROPERTIES ('transactional'='true') AS
+SELECT a
+FROM t1
+WHERE b < 10
+POSTHOOK: type: CREATE_MATERIALIZED_VIEW
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@mat1
+POSTHOOK: Lineage: mat1.a SIMPLE [(t1)t1.FieldSchema(name:a, type:int,
comment:null), ]
+PREHOOK: query: delete from t1 where b = 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Output: default@t1
+POSTHOOK: query: delete from t1 where b = 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: default@t1
+PREHOOK: query: explain
+alter materialized view mat1 rebuild
+PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD
+PREHOOK: Input: default@t1
+PREHOOK: Output: default@mat1
+POSTHOOK: query: explain
+alter materialized view mat1 rebuild
+POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: default@mat1
+STAGE DEPENDENCIES:
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+ Stage-4 depends on stages: Stage-3
+
+STAGE PLANS:
+ Stage: Stage-1
+ Tez
+#### A masked pattern was here ####
+ Edges:
+ Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+ Vertices:
+ Map 1
+ Map Operator Tree:
+ TableScan
+ alias: t1
+ filterExpr: (b < 10) (type: boolean)
+ Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE
Column stats: COMPLETE
+ Filter Operator
+ predicate: (b < 10) (type: boolean)
+ Statistics: Num rows: 3 Data size: 24 Basic stats:
COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: a (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 3 Data size: 12 Basic stats:
COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 3 Data size: 12 Basic stats:
COMPLETE Column stats: COMPLETE
+ table:
+ input format:
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format:
org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.mat1
+ Write Type: INSERT
+ Select Operator
+ expressions: _col0 (type: int)
+ outputColumnNames: a
+ Statistics: Num rows: 3 Data size: 12 Basic stats:
COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: min(a), max(a), count(1), count(a),
compute_bit_vector_hll(a)
+ minReductionHashAggr: 0.6666666
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1 Data size: 168 Basic stats:
COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ null sort order:
+ sort order:
+ Statistics: Num rows: 1 Data size: 168 Basic
stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: int), _col1 (type:
int), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary)
+ Execution mode: vectorized, llap
+ LLAP IO: may be used (ACID table)
+ Reducer 2
+ Execution mode: vectorized, llap
+ Reduce Operator Tree:
+ Group By Operator
+ aggregations: min(VALUE._col0), max(VALUE._col1),
count(VALUE._col2), count(VALUE._col3), compute_bit_vector_hll(VALUE._col4)
+ mode: mergepartial
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE
Column stats: COMPLETE
+ Select Operator
+ expressions: 'LONG' (type: string), UDFToLong(_col0) (type:
bigint), UDFToLong(_col1) (type: bigint), (_col2 - _col3) (type: bigint),
COALESCE(ndv_compute_bit_vector(_col4),0) (type: bigint), _col4 (type: binary)
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+ Statistics: Num rows: 1 Data size: 264 Basic stats: COMPLETE
Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 1 Data size: 264 Basic stats:
COMPLETE Column stats: COMPLETE
+ table:
+ input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde:
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+ Stage: Stage-2
+ Dependency Collection
+
+ Stage: Stage-0
+ Move Operator
+ tables:
+ replace: false
+ table:
+ input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.mat1
+ Write Type: INSERT
+
+ Stage: Stage-3
+ Stats Work
+ Basic Stats Work:
+ Column Stats Desc:
+ Columns: a
+ Column Types: int
+ Table: default.mat1
+
+ Stage: Stage-4
+ Materialized View Update
+ name: default.mat1
+ update creation metadata: true
+
+PREHOOK: query: alter materialized view mat1 rebuild
+PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD
+PREHOOK: Input: default@t1
+PREHOOK: Output: default@mat1
+POSTHOOK: query: alter materialized view mat1 rebuild
+POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: default@mat1
+POSTHOOK: Lineage: mat1.a SIMPLE [(t1)t1.FieldSchema(name:a, type:int,
comment:null), ]
+PREHOOK: query: explain cbo
+SELECT a
+FROM t1
+WHERE b < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mat1
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: explain cbo
+SELECT a
+FROM t1
+WHERE b < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mat1
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+CBO PLAN:
+HiveTableScan(table=[[default, mat1]], table:alias=[default.mat1])
+
+PREHOOK: query: SELECT a
+FROM t1
+WHERE b < 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@mat1
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT a
+FROM t1
+WHERE b < 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@mat1
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+3
+2
+1
diff --git
a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out
b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out
index 6c6bf7c1b6e..1b06d867b39 100644
---
a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out
+++
b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out
@@ -7,6 +7,8 @@ POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@cmv_basetable_n6
PREHOOK: query: insert into cmv_basetable_n6 values
+ (1, 'alfred', 10.30, 2),
+ (1, 'alfred', 10.30, 2),
(1, 'alfred', 10.30, 2),
(2, 'bob', 3.14, 3),
(2, 'bonnie', 172342.2, 3),
@@ -16,6 +18,8 @@ PREHOOK: type: QUERY
PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@cmv_basetable_n6
POSTHOOK: query: insert into cmv_basetable_n6 values
+ (1, 'alfred', 10.30, 2),
+ (1, 'alfred', 10.30, 2),
(1, 'alfred', 10.30, 2),
(2, 'bob', 3.14, 3),
(2, 'bonnie', 172342.2, 3),
@@ -37,12 +41,14 @@ POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@cmv_basetable_2_n3
PREHOOK: query: insert into cmv_basetable_2_n3 values
+ (1, 'alfred', 10.30, 2),
(1, 'alfred', 10.30, 2),
(3, 'calvin', 978.76, 3)
PREHOOK: type: QUERY
PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@cmv_basetable_2_n3
POSTHOOK: query: insert into cmv_basetable_2_n3 values
+ (1, 'alfred', 10.30, 2),
(1, 'alfred', 10.30, 2),
(3, 'calvin', 978.76, 3)
POSTHOOK: type: QUERY
@@ -121,20 +127,20 @@ STAGE PLANS:
TableScan
alias: cmv_basetable_n6
filterExpr: a is not null (type: boolean)
- Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE
Column stats: COMPLETE
+ Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE
Column stats: COMPLETE
Filter Operator
predicate: a is not null (type: boolean)
- Statistics: Num rows: 5 Data size: 20 Basic stats:
COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 7 Data size: 28 Basic stats:
COMPLETE Column stats: COMPLETE
Select Operator
expressions: a (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 5 Data size: 20 Basic stats:
COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 7 Data size: 28 Basic stats:
COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
- Statistics: Num rows: 5 Data size: 20 Basic stats:
COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 7 Data size: 28 Basic stats:
COMPLETE Column stats: COMPLETE
Execution mode: llap
LLAP IO: may be used (ACID table)
Map 3
@@ -142,20 +148,20 @@ STAGE PLANS:
TableScan
alias: cmv_basetable_2_n3
filterExpr: ((c > 10.1) and a is not null) (type: boolean)
- Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE
Column stats: COMPLETE
+ Statistics: Num rows: 4 Data size: 464 Basic stats: COMPLETE
Column stats: COMPLETE
Filter Operator
predicate: ((c > 10.1) and a is not null) (type: boolean)
- Statistics: Num rows: 3 Data size: 348 Basic stats:
COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 4 Data size: 464 Basic stats:
COMPLETE Column stats: COMPLETE
Select Operator
expressions: a (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 3 Data size: 12 Basic stats:
COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 4 Data size: 16 Basic stats:
COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
- Statistics: Num rows: 3 Data size: 12 Basic stats:
COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 4 Data size: 16 Basic stats:
COMPLETE Column stats: COMPLETE
Execution mode: llap
LLAP IO: may be used (ACID table)
Reducer 2
@@ -168,10 +174,10 @@ STAGE PLANS:
0 _col0 (type: int)
1 _col0 (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE
Column stats: COMPLETE
+ Statistics: Num rows: 9 Data size: 36 Basic stats: COMPLETE
Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE
Column stats: COMPLETE
+ Statistics: Num rows: 9 Data size: 36 Basic stats: COMPLETE
Column stats: COMPLETE
table:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -198,6 +204,11 @@ POSTHOOK: Input: default@cmv_basetable_2_n3
POSTHOOK: Input: default@cmv_basetable_n6
#### A masked pattern was here ####
1
+1
+1
+1
+1
+1
3
3
3
@@ -235,20 +246,20 @@ STAGE PLANS:
TableScan
alias: cmv_basetable_n6
filterExpr: a is not null (type: boolean)
- Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE
Column stats: COMPLETE
+ Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE
Column stats: COMPLETE
Filter Operator
predicate: a is not null (type: boolean)
- Statistics: Num rows: 5 Data size: 20 Basic stats:
COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 7 Data size: 28 Basic stats:
COMPLETE Column stats: COMPLETE
Select Operator
expressions: a (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 5 Data size: 20 Basic stats:
COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 7 Data size: 28 Basic stats:
COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
- Statistics: Num rows: 5 Data size: 20 Basic stats:
COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 7 Data size: 28 Basic stats:
COMPLETE Column stats: COMPLETE
Execution mode: llap
LLAP IO: may be used (ACID table)
Map 4
@@ -256,7 +267,7 @@ STAGE PLANS:
TableScan
alias: cmv_basetable_2_n3
filterExpr: ((ROW__ID.writeid > 1L) and (c > 10) and a is
not null) (type: boolean)
- Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE
Column stats: COMPLETE
+ Statistics: Num rows: 4 Data size: 464 Basic stats: COMPLETE
Column stats: COMPLETE
Filter Operator
predicate: ((ROW__ID.writeid > 1L) and (c > 10) and a is
not null) (type: boolean)
Statistics: Num rows: 1 Data size: 116 Basic stats:
COMPLETE Column stats: COMPLETE
@@ -391,9 +402,9 @@ Table Parameters:
COLUMN_STATS_ACCURATE
{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"c\":\"true\"}}
bucketing_version 2
numFiles 2
- numRows 5
+ numRows 10
rawDataSize 0
- totalSize 1516
+ totalSize 1522
transactional true
transactional_properties default
#### A masked pattern was here ####
@@ -474,6 +485,11 @@ POSTHOOK: Input: default@cmv_basetable_n6
POSTHOOK: Input: default@cmv_mat_view_n6
#### A masked pattern was here ####
1
+1
+1
+1
+1
+1
3
3
3
@@ -497,101 +513,70 @@ ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD
PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD
PREHOOK: Input: default@cmv_basetable_2_n3
PREHOOK: Input: default@cmv_basetable_n6
-PREHOOK: Input: default@cmv_mat_view_n6
-PREHOOK: Output: default@cmv_mat_view_n6
PREHOOK: Output: default@cmv_mat_view_n6
POSTHOOK: query: EXPLAIN
ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD
POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD
POSTHOOK: Input: default@cmv_basetable_2_n3
POSTHOOK: Input: default@cmv_basetable_n6
-POSTHOOK: Input: default@cmv_mat_view_n6
-POSTHOOK: Output: default@cmv_mat_view_n6
POSTHOOK: Output: default@cmv_mat_view_n6
STAGE DEPENDENCIES:
- Stage-2 is a root stage
- Stage-3 depends on stages: Stage-2
- Stage-0 depends on stages: Stage-3
- Stage-4 depends on stages: Stage-0
- Stage-6 depends on stages: Stage-4, Stage-5
- Stage-1 depends on stages: Stage-3
- Stage-5 depends on stages: Stage-1
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+ Stage-4 depends on stages: Stage-3
STAGE PLANS:
- Stage: Stage-2
+ Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
- Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 6 (SIMPLE_EDGE)
- Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
- Reducer 4 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
- Reducer 6 <- Map 5 (SIMPLE_EDGE), Map 7 (SIMPLE_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
- Map Operator Tree:
- TableScan
- alias: default.cmv_mat_view_n6
- Statistics: Num rows: 5 Data size: 580 Basic stats: COMPLETE
Column stats: COMPLETE
- Select Operator
- expressions: a (type: int), c (type: decimal(10,2)), true
(type: boolean), ROW__ID (type:
struct<writeid:bigint,bucketid:int,rowid:bigint>)
- outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 5 Data size: 980 Basic stats:
COMPLETE Column stats: COMPLETE
- Reduce Output Operator
- key expressions: _col0 (type: int), _col1 (type:
decimal(10,2))
- null sort order: zz
- sort order: ++
- Map-reduce partition columns: _col0 (type: int), _col1
(type: decimal(10,2))
- Statistics: Num rows: 5 Data size: 980 Basic stats:
COMPLETE Column stats: COMPLETE
- value expressions: _col2 (type: boolean), _col3 (type:
struct<writeid:bigint,bucketid:int,rowid:bigint>)
- Execution mode: llap
- LLAP IO: may be used (ACID table)
- Map 5
Map Operator Tree:
TableScan
alias: cmv_basetable_n6
filterExpr: a is not null (type: boolean)
- properties:
- acid.fetch.deleted.rows TRUE
- Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE
Column stats: COMPLETE
+ Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE
Column stats: COMPLETE
Filter Operator
predicate: a is not null (type: boolean)
- Statistics: Num rows: 5 Data size: 20 Basic stats:
COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 7 Data size: 28 Basic stats:
COMPLETE Column stats: COMPLETE
Select Operator
- expressions: a (type: int), ROW__IS__DELETED (type:
boolean)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 5 Data size: 40 Basic stats:
COMPLETE Column stats: COMPLETE
+ expressions: a (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 7 Data size: 28 Basic stats:
COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
- Statistics: Num rows: 5 Data size: 40 Basic stats:
COMPLETE Column stats: COMPLETE
- value expressions: _col1 (type: boolean)
+ Statistics: Num rows: 7 Data size: 28 Basic stats:
COMPLETE Column stats: COMPLETE
Execution mode: llap
LLAP IO: may be used (ACID table)
- Map 7
+ Map 4
Map Operator Tree:
TableScan
alias: cmv_basetable_2_n3
- filterExpr: ((ROW__ID.writeid > 2L) and (c > 10) and a is
not null) (type: boolean)
- properties:
- acid.fetch.deleted.rows TRUE
- Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE
Column stats: COMPLETE
+ filterExpr: ((c > 10) and a is not null) (type: boolean)
+ Statistics: Num rows: 4 Data size: 464 Basic stats: COMPLETE
Column stats: COMPLETE
Filter Operator
- predicate: ((ROW__ID.writeid > 2L) and (c > 10) and a is
not null) (type: boolean)
- Statistics: Num rows: 1 Data size: 116 Basic stats:
COMPLETE Column stats: COMPLETE
+ predicate: ((c > 10) and a is not null) (type: boolean)
+ Statistics: Num rows: 4 Data size: 464 Basic stats:
COMPLETE Column stats: COMPLETE
Select Operator
- expressions: a (type: int), c (type: decimal(10,2)),
ROW__IS__DELETED (type: boolean)
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 1 Data size: 120 Basic stats:
COMPLETE Column stats: COMPLETE
+ expressions: a (type: int), c (type: decimal(10,2))
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 4 Data size: 464 Basic stats:
COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
- Statistics: Num rows: 1 Data size: 120 Basic stats:
COMPLETE Column stats: COMPLETE
- value expressions: _col1 (type: decimal(10,2)), _col2
(type: boolean)
+ Statistics: Num rows: 4 Data size: 464 Basic stats:
COMPLETE Column stats: COMPLETE
+ value expressions: _col1 (type: decimal(10,2))
Execution mode: llap
LLAP IO: may be used (ACID table)
Reducer 2
@@ -599,74 +584,41 @@ STAGE PLANS:
Reduce Operator Tree:
Merge Join Operator
condition map:
- Right Outer Join 0 to 1
+ Inner Join 0 to 1
keys:
- 0 _col0 (type: int), _col1 (type: decimal(10,2))
- 1 _col0 (type: int), _col1 (type: decimal(10,2))
- nullSafes: [true, true]
- outputColumnNames: _col2, _col3, _col4, _col5, _col6
- Statistics: Num rows: 5 Data size: 920 Basic stats: COMPLETE
Column stats: COMPLETE
- Filter Operator
- predicate: (_col2 and _col6) (type: boolean)
- Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE
Column stats: COMPLETE
- Select Operator
- expressions: _col3 (type:
struct<writeid:bigint,bucketid:int,rowid:bigint>)
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 76 Basic stats:
COMPLETE Column stats: COMPLETE
- Reduce Output Operator
- key expressions: _col0 (type:
struct<writeid:bigint,bucketid:int,rowid:bigint>)
- null sort order: z
- sort order: +
- Map-reduce partition columns: UDFToInteger(_col0) (type:
int)
- Statistics: Num rows: 1 Data size: 76 Basic stats:
COMPLETE Column stats: COMPLETE
- Filter Operator
- predicate: (not _col6) (type: boolean)
- Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE
Column stats: COMPLETE
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ outputColumnNames: _col0, _col2
+ Statistics: Num rows: 9 Data size: 1044 Basic stats: COMPLETE
Column stats: COMPLETE
+ Select Operator
+ expressions: _col0 (type: int), _col2 (type: decimal(10,2))
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 9 Data size: 1044 Basic stats:
COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 9 Data size: 1044 Basic stats:
COMPLETE Column stats: COMPLETE
+ table:
+ input format:
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format:
org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.cmv_mat_view_n6
+ Write Type: INSERT
Select Operator
- expressions: _col4 (type: int), _col5 (type: decimal(10,2))
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 1 Data size: 116 Basic stats:
COMPLETE Column stats: COMPLETE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 116 Basic stats:
COMPLETE Column stats: COMPLETE
- table:
- input format:
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
- output format:
org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
- serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
- name: default.cmv_mat_view_n6
- Write Type: INSERT
- Select Operator
- expressions: _col0 (type: int), _col1 (type:
decimal(10,2))
- outputColumnNames: a, c
- Statistics: Num rows: 1 Data size: 116 Basic stats:
COMPLETE Column stats: COMPLETE
- Group By Operator
- aggregations: min(a), max(a), count(1), count(a),
compute_bit_vector_hll(a), min(c), max(c), count(c), compute_bit_vector_hll(c)
- minReductionHashAggr: 0.4
- mode: hash
- outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8
+ expressions: _col0 (type: int), _col1 (type: decimal(10,2))
+ outputColumnNames: a, c
+ Statistics: Num rows: 9 Data size: 1044 Basic stats:
COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: min(a), max(a), count(1), count(a),
compute_bit_vector_hll(a), min(c), max(c), count(c), compute_bit_vector_hll(c)
+ minReductionHashAggr: 0.8888889
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8
+ Statistics: Num rows: 1 Data size: 544 Basic stats:
COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ null sort order:
+ sort order:
Statistics: Num rows: 1 Data size: 544 Basic stats:
COMPLETE Column stats: COMPLETE
- Reduce Output Operator
- null sort order:
- sort order:
- Statistics: Num rows: 1 Data size: 544 Basic stats:
COMPLETE Column stats: COMPLETE
- value expressions: _col0 (type: int), _col1 (type:
int), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary), _col5
(type: decimal(10,2)), _col6 (type: decimal(10,2)), _col7 (type: bigint), _col8
(type: binary)
+ value expressions: _col0 (type: int), _col1 (type:
int), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary), _col5
(type: decimal(10,2)), _col6 (type: decimal(10,2)), _col7 (type: bigint), _col8
(type: binary)
Reducer 3
- Execution mode: llap
- Reduce Operator Tree:
- Select Operator
- expressions: KEY.reducesinkkey0 (type:
struct<writeid:bigint,bucketid:int,rowid:bigint>)
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE
Column stats: COMPLETE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE
Column stats: COMPLETE
- table:
- input format:
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
- output format:
org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
- serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
- name: default.cmv_mat_view_n6
- Write Type: DELETE
- Reducer 4
Execution mode: llap
Reduce Operator Tree:
Group By Operator
@@ -685,53 +637,11 @@ STAGE PLANS:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde:
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- Reducer 6
- Execution mode: llap
- Reduce Operator Tree:
- Merge Join Operator
- condition map:
- Inner Join 0 to 1
- keys:
- 0 _col0 (type: int)
- 1 _col0 (type: int)
- outputColumnNames: _col0, _col1, _col3, _col4
- Statistics: Num rows: 2 Data size: 248 Basic stats: COMPLETE
Column stats: COMPLETE
- Select Operator
- expressions: _col0 (type: int), _col3 (type: decimal(10,2)),
(_col1 or _col4) (type: boolean)
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 2 Data size: 240 Basic stats: COMPLETE
Column stats: COMPLETE
- Reduce Output Operator
- key expressions: _col0 (type: int), _col1 (type:
decimal(10,2))
- null sort order: zz
- sort order: ++
- Map-reduce partition columns: _col0 (type: int), _col1
(type: decimal(10,2))
- Statistics: Num rows: 2 Data size: 240 Basic stats:
COMPLETE Column stats: COMPLETE
- value expressions: _col2 (type: boolean)
- Stage: Stage-3
+ Stage: Stage-2
Dependency Collection
Stage: Stage-0
- Move Operator
- tables:
- replace: false
- table:
- input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
- output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
- serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
- name: default.cmv_mat_view_n6
- Write Type: DELETE
-
- Stage: Stage-4
- Stats Work
- Basic Stats Work:
-
- Stage: Stage-6
- Materialized View Update
- name: default.cmv_mat_view_n6
- update creation metadata: true
-
- Stage: Stage-1
Move Operator
tables:
replace: false
@@ -742,7 +652,7 @@ STAGE PLANS:
name: default.cmv_mat_view_n6
Write Type: INSERT
- Stage: Stage-5
+ Stage: Stage-3
Stats Work
Basic Stats Work:
Column Stats Desc:
@@ -750,19 +660,20 @@ STAGE PLANS:
Column Types: int, decimal(10,2)
Table: default.cmv_mat_view_n6
+ Stage: Stage-4
+ Materialized View Update
+ name: default.cmv_mat_view_n6
+ update creation metadata: true
+
PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD
PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD
PREHOOK: Input: default@cmv_basetable_2_n3
PREHOOK: Input: default@cmv_basetable_n6
-PREHOOK: Input: default@cmv_mat_view_n6
-PREHOOK: Output: default@cmv_mat_view_n6
PREHOOK: Output: default@cmv_mat_view_n6
POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD
POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD
POSTHOOK: Input: default@cmv_basetable_2_n3
POSTHOOK: Input: default@cmv_basetable_n6
-POSTHOOK: Input: default@cmv_mat_view_n6
-POSTHOOK: Output: default@cmv_mat_view_n6
POSTHOOK: Output: default@cmv_mat_view_n6
POSTHOOK: Lineage: cmv_mat_view_n6.a SIMPLE
[(cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:a, type:int,
comment:null), ]
POSTHOOK: Lineage: cmv_mat_view_n6.c SIMPLE
[(cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:c, type:decimal(10,2),
comment:null), ]
@@ -818,12 +729,14 @@ POSTHOOK: Input: default@cmv_basetable_2_n3
POSTHOOK: Input: default@cmv_basetable_n6
POSTHOOK: Input: default@cmv_mat_view_n6
#### A masked pattern was here ####
+2
+2
+2
+2
3
3
3
3
-2
-2
PREHOOK: query: DELETE FROM cmv_basetable_2_n3 WHERE a=2
PREHOOK: type: QUERY
PREHOOK: Input: default@cmv_basetable_2_n3
@@ -837,101 +750,70 @@ ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD
PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD
PREHOOK: Input: default@cmv_basetable_2_n3
PREHOOK: Input: default@cmv_basetable_n6
-PREHOOK: Input: default@cmv_mat_view_n6
-PREHOOK: Output: default@cmv_mat_view_n6
PREHOOK: Output: default@cmv_mat_view_n6
POSTHOOK: query: EXPLAIN
ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD
POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD
POSTHOOK: Input: default@cmv_basetable_2_n3
POSTHOOK: Input: default@cmv_basetable_n6
-POSTHOOK: Input: default@cmv_mat_view_n6
-POSTHOOK: Output: default@cmv_mat_view_n6
POSTHOOK: Output: default@cmv_mat_view_n6
STAGE DEPENDENCIES:
- Stage-2 is a root stage
- Stage-3 depends on stages: Stage-2
- Stage-0 depends on stages: Stage-3
- Stage-4 depends on stages: Stage-0
- Stage-6 depends on stages: Stage-4, Stage-5
- Stage-1 depends on stages: Stage-3
- Stage-5 depends on stages: Stage-1
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+ Stage-4 depends on stages: Stage-3
STAGE PLANS:
- Stage: Stage-2
+ Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
- Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 6 (SIMPLE_EDGE)
- Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
- Reducer 4 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
- Reducer 6 <- Map 5 (SIMPLE_EDGE), Map 7 (SIMPLE_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
- Map Operator Tree:
- TableScan
- alias: default.cmv_mat_view_n6
- Statistics: Num rows: 6 Data size: 696 Basic stats: COMPLETE
Column stats: COMPLETE
- Select Operator
- expressions: a (type: int), c (type: decimal(10,2)), true
(type: boolean), ROW__ID (type:
struct<writeid:bigint,bucketid:int,rowid:bigint>)
- outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 6 Data size: 1176 Basic stats:
COMPLETE Column stats: COMPLETE
- Reduce Output Operator
- key expressions: _col0 (type: int), _col1 (type:
decimal(10,2))
- null sort order: zz
- sort order: ++
- Map-reduce partition columns: _col0 (type: int), _col1
(type: decimal(10,2))
- Statistics: Num rows: 6 Data size: 1176 Basic stats:
COMPLETE Column stats: COMPLETE
- value expressions: _col2 (type: boolean), _col3 (type:
struct<writeid:bigint,bucketid:int,rowid:bigint>)
- Execution mode: llap
- LLAP IO: may be used (ACID table)
- Map 5
Map Operator Tree:
TableScan
alias: cmv_basetable_n6
filterExpr: a is not null (type: boolean)
- properties:
- acid.fetch.deleted.rows TRUE
- Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE
Column stats: COMPLETE
+ Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE
Column stats: COMPLETE
Filter Operator
predicate: a is not null (type: boolean)
- Statistics: Num rows: 5 Data size: 20 Basic stats:
COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 7 Data size: 28 Basic stats:
COMPLETE Column stats: COMPLETE
Select Operator
- expressions: a (type: int), ROW__IS__DELETED (type:
boolean)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 5 Data size: 40 Basic stats:
COMPLETE Column stats: COMPLETE
+ expressions: a (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 7 Data size: 28 Basic stats:
COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
- Statistics: Num rows: 5 Data size: 40 Basic stats:
COMPLETE Column stats: COMPLETE
- value expressions: _col1 (type: boolean)
+ Statistics: Num rows: 7 Data size: 28 Basic stats:
COMPLETE Column stats: COMPLETE
Execution mode: llap
LLAP IO: may be used (ACID table)
- Map 7
+ Map 4
Map Operator Tree:
TableScan
alias: cmv_basetable_2_n3
- filterExpr: ((ROW__ID.writeid > 3L) and (c > 10) and a is
not null) (type: boolean)
- properties:
- acid.fetch.deleted.rows TRUE
+ filterExpr: ((c > 10) and a is not null) (type: boolean)
Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE
Column stats: COMPLETE
Filter Operator
- predicate: ((ROW__ID.writeid > 3L) and (c > 10) and a is
not null) (type: boolean)
- Statistics: Num rows: 1 Data size: 116 Basic stats:
COMPLETE Column stats: COMPLETE
+ predicate: ((c > 10) and a is not null) (type: boolean)
+ Statistics: Num rows: 2 Data size: 232 Basic stats:
COMPLETE Column stats: COMPLETE
Select Operator
- expressions: a (type: int), c (type: decimal(10,2)),
ROW__IS__DELETED (type: boolean)
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 1 Data size: 120 Basic stats:
COMPLETE Column stats: COMPLETE
+ expressions: a (type: int), c (type: decimal(10,2))
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 2 Data size: 232 Basic stats:
COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
- Statistics: Num rows: 1 Data size: 120 Basic stats:
COMPLETE Column stats: COMPLETE
- value expressions: _col1 (type: decimal(10,2)), _col2
(type: boolean)
+ Statistics: Num rows: 2 Data size: 232 Basic stats:
COMPLETE Column stats: COMPLETE
+ value expressions: _col1 (type: decimal(10,2))
Execution mode: llap
LLAP IO: may be used (ACID table)
Reducer 2
@@ -939,74 +821,41 @@ STAGE PLANS:
Reduce Operator Tree:
Merge Join Operator
condition map:
- Right Outer Join 0 to 1
+ Inner Join 0 to 1
keys:
- 0 _col0 (type: int), _col1 (type: decimal(10,2))
- 1 _col0 (type: int), _col1 (type: decimal(10,2))
- nullSafes: [true, true]
- outputColumnNames: _col2, _col3, _col4, _col5, _col6
- Statistics: Num rows: 9 Data size: 1640 Basic stats: COMPLETE
Column stats: COMPLETE
- Filter Operator
- predicate: (_col2 and _col6) (type: boolean)
- Statistics: Num rows: 2 Data size: 400 Basic stats: COMPLETE
Column stats: COMPLETE
- Select Operator
- expressions: _col3 (type:
struct<writeid:bigint,bucketid:int,rowid:bigint>)
- outputColumnNames: _col0
- Statistics: Num rows: 2 Data size: 152 Basic stats:
COMPLETE Column stats: COMPLETE
- Reduce Output Operator
- key expressions: _col0 (type:
struct<writeid:bigint,bucketid:int,rowid:bigint>)
- null sort order: z
- sort order: +
- Map-reduce partition columns: UDFToInteger(_col0) (type:
int)
- Statistics: Num rows: 2 Data size: 152 Basic stats:
COMPLETE Column stats: COMPLETE
- Filter Operator
- predicate: (not _col6) (type: boolean)
- Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE
Column stats: COMPLETE
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ outputColumnNames: _col0, _col2
+ Statistics: Num rows: 7 Data size: 812 Basic stats: COMPLETE
Column stats: COMPLETE
+ Select Operator
+ expressions: _col0 (type: int), _col2 (type: decimal(10,2))
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 7 Data size: 812 Basic stats: COMPLETE
Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 7 Data size: 812 Basic stats:
COMPLETE Column stats: COMPLETE
+ table:
+ input format:
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format:
org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.cmv_mat_view_n6
+ Write Type: INSERT
Select Operator
- expressions: _col4 (type: int), _col5 (type: decimal(10,2))
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 1 Data size: 116 Basic stats:
COMPLETE Column stats: COMPLETE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 116 Basic stats:
COMPLETE Column stats: COMPLETE
- table:
- input format:
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
- output format:
org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
- serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
- name: default.cmv_mat_view_n6
- Write Type: INSERT
- Select Operator
- expressions: _col0 (type: int), _col1 (type:
decimal(10,2))
- outputColumnNames: a, c
- Statistics: Num rows: 1 Data size: 116 Basic stats:
COMPLETE Column stats: COMPLETE
- Group By Operator
- aggregations: min(a), max(a), count(1), count(a),
compute_bit_vector_hll(a), min(c), max(c), count(c), compute_bit_vector_hll(c)
- minReductionHashAggr: 0.4
- mode: hash
- outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8
+ expressions: _col0 (type: int), _col1 (type: decimal(10,2))
+ outputColumnNames: a, c
+ Statistics: Num rows: 7 Data size: 812 Basic stats:
COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: min(a), max(a), count(1), count(a),
compute_bit_vector_hll(a), min(c), max(c), count(c), compute_bit_vector_hll(c)
+ minReductionHashAggr: 0.85714287
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8
+ Statistics: Num rows: 1 Data size: 544 Basic stats:
COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ null sort order:
+ sort order:
Statistics: Num rows: 1 Data size: 544 Basic stats:
COMPLETE Column stats: COMPLETE
- Reduce Output Operator
- null sort order:
- sort order:
- Statistics: Num rows: 1 Data size: 544 Basic stats:
COMPLETE Column stats: COMPLETE
- value expressions: _col0 (type: int), _col1 (type:
int), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary), _col5
(type: decimal(10,2)), _col6 (type: decimal(10,2)), _col7 (type: bigint), _col8
(type: binary)
+ value expressions: _col0 (type: int), _col1 (type:
int), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary), _col5
(type: decimal(10,2)), _col6 (type: decimal(10,2)), _col7 (type: bigint), _col8
(type: binary)
Reducer 3
- Execution mode: llap
- Reduce Operator Tree:
- Select Operator
- expressions: KEY.reducesinkkey0 (type:
struct<writeid:bigint,bucketid:int,rowid:bigint>)
- outputColumnNames: _col0
- Statistics: Num rows: 2 Data size: 152 Basic stats: COMPLETE
Column stats: COMPLETE
- File Output Operator
- compressed: false
- Statistics: Num rows: 2 Data size: 152 Basic stats: COMPLETE
Column stats: COMPLETE
- table:
- input format:
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
- output format:
org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
- serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
- name: default.cmv_mat_view_n6
- Write Type: DELETE
- Reducer 4
Execution mode: llap
Reduce Operator Tree:
Group By Operator
@@ -1025,53 +874,11 @@ STAGE PLANS:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde:
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- Reducer 6
- Execution mode: llap
- Reduce Operator Tree:
- Merge Join Operator
- condition map:
- Inner Join 0 to 1
- keys:
- 0 _col0 (type: int)
- 1 _col0 (type: int)
- outputColumnNames: _col0, _col1, _col3, _col4
- Statistics: Num rows: 3 Data size: 372 Basic stats: COMPLETE
Column stats: COMPLETE
- Select Operator
- expressions: _col0 (type: int), _col3 (type: decimal(10,2)),
(_col1 or _col4) (type: boolean)
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 3 Data size: 360 Basic stats: COMPLETE
Column stats: COMPLETE
- Reduce Output Operator
- key expressions: _col0 (type: int), _col1 (type:
decimal(10,2))
- null sort order: zz
- sort order: ++
- Map-reduce partition columns: _col0 (type: int), _col1
(type: decimal(10,2))
- Statistics: Num rows: 3 Data size: 360 Basic stats:
COMPLETE Column stats: COMPLETE
- value expressions: _col2 (type: boolean)
- Stage: Stage-3
+ Stage: Stage-2
Dependency Collection
Stage: Stage-0
- Move Operator
- tables:
- replace: false
- table:
- input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
- output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
- serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
- name: default.cmv_mat_view_n6
- Write Type: DELETE
-
- Stage: Stage-4
- Stats Work
- Basic Stats Work:
-
- Stage: Stage-6
- Materialized View Update
- name: default.cmv_mat_view_n6
- update creation metadata: true
-
- Stage: Stage-1
Move Operator
tables:
replace: false
@@ -1082,7 +889,7 @@ STAGE PLANS:
name: default.cmv_mat_view_n6
Write Type: INSERT
- Stage: Stage-5
+ Stage: Stage-3
Stats Work
Basic Stats Work:
Column Stats Desc:
@@ -1090,19 +897,20 @@ STAGE PLANS:
Column Types: int, decimal(10,2)
Table: default.cmv_mat_view_n6
+ Stage: Stage-4
+ Materialized View Update
+ name: default.cmv_mat_view_n6
+ update creation metadata: true
+
PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD
PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD
PREHOOK: Input: default@cmv_basetable_2_n3
PREHOOK: Input: default@cmv_basetable_n6
-PREHOOK: Input: default@cmv_mat_view_n6
-PREHOOK: Output: default@cmv_mat_view_n6
PREHOOK: Output: default@cmv_mat_view_n6
POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD
POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD
POSTHOOK: Input: default@cmv_basetable_2_n3
POSTHOOK: Input: default@cmv_basetable_n6
-POSTHOOK: Input: default@cmv_mat_view_n6
-POSTHOOK: Output: default@cmv_mat_view_n6
POSTHOOK: Output: default@cmv_mat_view_n6
POSTHOOK: Lineage: cmv_mat_view_n6.a SIMPLE
[(cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:a, type:int,
comment:null), ]
POSTHOOK: Lineage: cmv_mat_view_n6.c SIMPLE
[(cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:c, type:decimal(10,2),
comment:null), ]
@@ -1209,20 +1017,20 @@ STAGE PLANS:
TableScan
alias: cmv_basetable_n6
filterExpr: a is not null (type: boolean)
- Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE
Column stats: COMPLETE
+ Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE
Column stats: COMPLETE
Filter Operator
predicate: a is not null (type: boolean)
- Statistics: Num rows: 5 Data size: 20 Basic stats:
COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 7 Data size: 28 Basic stats:
COMPLETE Column stats: COMPLETE
Select Operator
expressions: a (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 5 Data size: 20 Basic stats:
COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 7 Data size: 28 Basic stats:
COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
- Statistics: Num rows: 5 Data size: 20 Basic stats:
COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 7 Data size: 28 Basic stats:
COMPLETE Column stats: COMPLETE
Execution mode: llap
LLAP IO: may be used (ACID table)
Map 4
@@ -1257,14 +1065,14 @@ STAGE PLANS:
0 _col0 (type: int)
1 _col0 (type: int)
outputColumnNames: _col0, _col2
- Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE
Column stats: COMPLETE
+ Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE
Column stats: COMPLETE
Select Operator
expressions: _col0 (type: int), _col2 (type: decimal(10,2))
outputColumnNames: _col0, _col1
- Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE
Column stats: COMPLETE
+ Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE
Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 2 Data size: 232 Basic stats:
COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 3 Data size: 348 Basic stats:
COMPLETE Column stats: COMPLETE
table:
input format:
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format:
org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
@@ -1274,10 +1082,10 @@ STAGE PLANS:
Select Operator
expressions: _col0 (type: int), _col1 (type: decimal(10,2))
outputColumnNames: a, c
- Statistics: Num rows: 2 Data size: 232 Basic stats:
COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 3 Data size: 348 Basic stats:
COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: min(a), max(a), count(1), count(a),
compute_bit_vector_hll(a), min(c), max(c), count(c), compute_bit_vector_hll(c)
- minReductionHashAggr: 0.5
+ minReductionHashAggr: 0.6666666
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8
Statistics: Num rows: 1 Data size: 544 Basic stats:
COMPLETE Column stats: COMPLETE
@@ -1402,6 +1210,8 @@ POSTHOOK: Input: default@cmv_mat_view_n6
3
3
1
+1
+1
PREHOOK: query: drop materialized view cmv_mat_view_n6
PREHOOK: type: DROP_MATERIALIZED_VIEW
PREHOOK: Input: default@cmv_mat_view_n6
diff --git
a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_8.q.out
b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_8.q.out
index 7aee7df9de4..3f7007a1501 100644
---
a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_8.q.out
+++
b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_8.q.out
@@ -7,6 +7,8 @@ POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@cmv_basetable_n6
PREHOOK: query: insert into cmv_basetable_n6 values
+ (1, 'alfred', 10.30, 2),
+ (1, 'alfred', 10.30, 2),
(1, 'alfred', 10.30, 2),
(2, 'bob', 3.14, 3),
(2, 'bonnie', 172342.2, 3),
@@ -16,6 +18,8 @@ PREHOOK: type: QUERY
PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@cmv_basetable_n6
POSTHOOK: query: insert into cmv_basetable_n6 values
+ (1, 'alfred', 10.30, 2),
+ (1, 'alfred', 10.30, 2),
(1, 'alfred', 10.30, 2),
(2, 'bob', 3.14, 3),
(2, 'bonnie', 172342.2, 3),
@@ -37,12 +41,14 @@ POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@cmv_basetable_2_n3
PREHOOK: query: insert into cmv_basetable_2_n3 values
+ (1, 'alfred', 10.30, 2),
(1, 'alfred', 10.30, 2),
(3, 'calvin', 978.76, 3)
PREHOOK: type: QUERY
PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@cmv_basetable_2_n3
POSTHOOK: query: insert into cmv_basetable_2_n3 values
+ (1, 'alfred', 10.30, 2),
(1, 'alfred', 10.30, 2),
(3, 'calvin', 978.76, 3)
POSTHOOK: type: QUERY
@@ -129,6 +135,11 @@ POSTHOOK: Input: default@cmv_basetable_2_n3
POSTHOOK: Input: default@cmv_basetable_n6
#### A masked pattern was here ####
1
+1
+1
+1
+1
+1
3
3
3
@@ -166,20 +177,20 @@ STAGE PLANS:
TableScan
alias: cmv_basetable_n6
filterExpr: a is not null (type: boolean)
- Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE
Column stats: COMPLETE
+ Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE
Column stats: COMPLETE
Filter Operator
predicate: a is not null (type: boolean)
- Statistics: Num rows: 5 Data size: 20 Basic stats:
COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 7 Data size: 28 Basic stats:
COMPLETE Column stats: COMPLETE
Select Operator
expressions: a (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 5 Data size: 20 Basic stats:
COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 7 Data size: 28 Basic stats:
COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
- Statistics: Num rows: 5 Data size: 20 Basic stats:
COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 7 Data size: 28 Basic stats:
COMPLETE Column stats: COMPLETE
Execution mode: vectorized, llap
LLAP IO: may be used (ACID table)
Map 4
@@ -189,7 +200,7 @@ STAGE PLANS:
filterExpr: ((ROW__ID.writeid > 1L) and (c > 10) and a is
not null) (type: boolean)
properties:
insertonly.fetch.bucketid TRUE
- Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE
Column stats: COMPLETE
+ Statistics: Num rows: 4 Data size: 464 Basic stats: COMPLETE
Column stats: COMPLETE
Filter Operator
predicate: ((ROW__ID.writeid > 1L) and (c > 10) and a is
not null) (type: boolean)
Statistics: Num rows: 1 Data size: 116 Basic stats:
COMPLETE Column stats: COMPLETE
@@ -344,6 +355,11 @@ POSTHOOK: Input: default@cmv_basetable_n6
POSTHOOK: Input: default@cmv_mat_view_n6
#### A masked pattern was here ####
1
+1
+1
+1
+1
+1
3
3
3
@@ -367,101 +383,70 @@ ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD
PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD
PREHOOK: Input: default@cmv_basetable_2_n3
PREHOOK: Input: default@cmv_basetable_n6
-PREHOOK: Input: default@cmv_mat_view_n6
-PREHOOK: Output: default@cmv_mat_view_n6
PREHOOK: Output: default@cmv_mat_view_n6
POSTHOOK: query: EXPLAIN
ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD
POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD
POSTHOOK: Input: default@cmv_basetable_2_n3
POSTHOOK: Input: default@cmv_basetable_n6
-POSTHOOK: Input: default@cmv_mat_view_n6
-POSTHOOK: Output: default@cmv_mat_view_n6
POSTHOOK: Output: default@cmv_mat_view_n6
STAGE DEPENDENCIES:
- Stage-2 is a root stage
- Stage-3 depends on stages: Stage-2
- Stage-0 depends on stages: Stage-3
- Stage-4 depends on stages: Stage-0
- Stage-6 depends on stages: Stage-4, Stage-5
- Stage-1 depends on stages: Stage-3
- Stage-5 depends on stages: Stage-1
+ Stage-1 is a root stage
+ Stage-2 depends on stages: Stage-1
+ Stage-0 depends on stages: Stage-2
+ Stage-3 depends on stages: Stage-0
+ Stage-4 depends on stages: Stage-3
STAGE PLANS:
- Stage: Stage-2
+ Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
- Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 6 (SIMPLE_EDGE)
- Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
- Reducer 4 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
- Reducer 6 <- Map 5 (SIMPLE_EDGE), Map 7 (SIMPLE_EDGE)
+ Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+ Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
- Map Operator Tree:
- TableScan
- alias: default.cmv_mat_view_n6
- Statistics: Num rows: 5 Data size: 580 Basic stats: COMPLETE
Column stats: COMPLETE
- Select Operator
- expressions: a (type: int), c (type: decimal(10,2)), true
(type: boolean), ROW__ID (type:
struct<writeid:bigint,bucketid:int,rowid:bigint>)
- outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 5 Data size: 980 Basic stats:
COMPLETE Column stats: COMPLETE
- Reduce Output Operator
- key expressions: _col0 (type: int), _col1 (type:
decimal(10,2))
- null sort order: zz
- sort order: ++
- Map-reduce partition columns: _col0 (type: int), _col1
(type: decimal(10,2))
- Statistics: Num rows: 5 Data size: 980 Basic stats:
COMPLETE Column stats: COMPLETE
- value expressions: _col2 (type: boolean), _col3 (type:
struct<writeid:bigint,bucketid:int,rowid:bigint>)
- Execution mode: vectorized, llap
- LLAP IO: may be used (ACID table)
- Map 5
Map Operator Tree:
TableScan
alias: cmv_basetable_n6
- filterExpr: ((ROW__ID.writeid > 1L) and a is not null)
(type: boolean)
- properties:
- acid.fetch.deleted.rows TRUE
- Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE
Column stats: COMPLETE
+ filterExpr: a is not null (type: boolean)
+ Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE
Column stats: COMPLETE
Filter Operator
- predicate: ((ROW__ID.writeid > 1L) and a is not null)
(type: boolean)
- Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE
Column stats: COMPLETE
+ predicate: a is not null (type: boolean)
+ Statistics: Num rows: 7 Data size: 28 Basic stats:
COMPLETE Column stats: COMPLETE
Select Operator
- expressions: a (type: int), ROW__IS__DELETED (type:
boolean)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 1 Data size: 8 Basic stats:
COMPLETE Column stats: COMPLETE
+ expressions: a (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 7 Data size: 28 Basic stats:
COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
- Statistics: Num rows: 1 Data size: 8 Basic stats:
COMPLETE Column stats: COMPLETE
- value expressions: _col1 (type: boolean)
+ Statistics: Num rows: 7 Data size: 28 Basic stats:
COMPLETE Column stats: COMPLETE
Execution mode: vectorized, llap
LLAP IO: may be used (ACID table)
- Map 7
+ Map 4
Map Operator Tree:
TableScan
alias: cmv_basetable_2_n3
filterExpr: ((c > 10) and a is not null) (type: boolean)
- properties:
- acid.fetch.deleted.rows TRUE
- Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE
Column stats: COMPLETE
+ Statistics: Num rows: 4 Data size: 464 Basic stats: COMPLETE
Column stats: COMPLETE
Filter Operator
predicate: ((c > 10) and a is not null) (type: boolean)
- Statistics: Num rows: 3 Data size: 348 Basic stats:
COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 4 Data size: 464 Basic stats:
COMPLETE Column stats: COMPLETE
Select Operator
- expressions: a (type: int), c (type: decimal(10,2)),
ROW__IS__DELETED (type: boolean)
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 3 Data size: 360 Basic stats:
COMPLETE Column stats: COMPLETE
+ expressions: a (type: int), c (type: decimal(10,2))
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 4 Data size: 464 Basic stats:
COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
- Statistics: Num rows: 3 Data size: 360 Basic stats:
COMPLETE Column stats: COMPLETE
- value expressions: _col1 (type: decimal(10,2)), _col2
(type: boolean)
+ Statistics: Num rows: 4 Data size: 464 Basic stats:
COMPLETE Column stats: COMPLETE
+ value expressions: _col1 (type: decimal(10,2))
Execution mode: vectorized, llap
LLAP IO: may be used (ACID table)
Reducer 2
@@ -469,74 +454,41 @@ STAGE PLANS:
Reduce Operator Tree:
Merge Join Operator
condition map:
- Right Outer Join 0 to 1
+ Inner Join 0 to 1
keys:
- 0 _col0 (type: int), _col1 (type: decimal(10,2))
- 1 _col0 (type: int), _col1 (type: decimal(10,2))
- nullSafes: [true, true]
- outputColumnNames: _col2, _col3, _col4, _col5, _col6
- Statistics: Num rows: 2 Data size: 400 Basic stats: COMPLETE
Column stats: COMPLETE
- Filter Operator
- predicate: (_col2 and _col6) (type: boolean)
- Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE
Column stats: COMPLETE
- Select Operator
- expressions: _col3 (type:
struct<writeid:bigint,bucketid:int,rowid:bigint>)
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 76 Basic stats:
COMPLETE Column stats: COMPLETE
- Reduce Output Operator
- key expressions: _col0 (type:
struct<writeid:bigint,bucketid:int,rowid:bigint>)
- null sort order: z
- sort order: +
- Map-reduce partition columns: UDFToInteger(_col0) (type:
int)
- Statistics: Num rows: 1 Data size: 76 Basic stats:
COMPLETE Column stats: COMPLETE
- Filter Operator
- predicate: (not _col6) (type: boolean)
- Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE
Column stats: COMPLETE
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ outputColumnNames: _col0, _col2
+ Statistics: Num rows: 9 Data size: 1044 Basic stats: COMPLETE
Column stats: COMPLETE
+ Select Operator
+ expressions: _col0 (type: int), _col2 (type: decimal(10,2))
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 9 Data size: 1044 Basic stats:
COMPLETE Column stats: COMPLETE
+ File Output Operator
+ compressed: false
+ Statistics: Num rows: 9 Data size: 1044 Basic stats:
COMPLETE Column stats: COMPLETE
+ table:
+ input format:
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+ output format:
org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+ serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+ name: default.cmv_mat_view_n6
+ Write Type: INSERT
Select Operator
- expressions: _col4 (type: int), _col5 (type: decimal(10,2))
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 1 Data size: 116 Basic stats:
COMPLETE Column stats: COMPLETE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 116 Basic stats:
COMPLETE Column stats: COMPLETE
- table:
- input format:
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
- output format:
org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
- serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
- name: default.cmv_mat_view_n6
- Write Type: INSERT
- Select Operator
- expressions: _col0 (type: int), _col1 (type:
decimal(10,2))
- outputColumnNames: a, c
- Statistics: Num rows: 1 Data size: 116 Basic stats:
COMPLETE Column stats: COMPLETE
- Group By Operator
- aggregations: min(a), max(a), count(1), count(a),
compute_bit_vector_hll(a), min(c), max(c), count(c), compute_bit_vector_hll(c)
- minReductionHashAggr: 0.4
- mode: hash
- outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8
+ expressions: _col0 (type: int), _col1 (type: decimal(10,2))
+ outputColumnNames: a, c
+ Statistics: Num rows: 9 Data size: 1044 Basic stats:
COMPLETE Column stats: COMPLETE
+ Group By Operator
+ aggregations: min(a), max(a), count(1), count(a),
compute_bit_vector_hll(a), min(c), max(c), count(c), compute_bit_vector_hll(c)
+ minReductionHashAggr: 0.8888889
+ mode: hash
+ outputColumnNames: _col0, _col1, _col2, _col3, _col4,
_col5, _col6, _col7, _col8
+ Statistics: Num rows: 1 Data size: 544 Basic stats:
COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ null sort order:
+ sort order:
Statistics: Num rows: 1 Data size: 544 Basic stats:
COMPLETE Column stats: COMPLETE
- Reduce Output Operator
- null sort order:
- sort order:
- Statistics: Num rows: 1 Data size: 544 Basic stats:
COMPLETE Column stats: COMPLETE
- value expressions: _col0 (type: int), _col1 (type:
int), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary), _col5
(type: decimal(10,2)), _col6 (type: decimal(10,2)), _col7 (type: bigint), _col8
(type: binary)
+ value expressions: _col0 (type: int), _col1 (type:
int), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary), _col5
(type: decimal(10,2)), _col6 (type: decimal(10,2)), _col7 (type: bigint), _col8
(type: binary)
Reducer 3
- Execution mode: vectorized, llap
- Reduce Operator Tree:
- Select Operator
- expressions: KEY.reducesinkkey0 (type:
struct<writeid:bigint,bucketid:int,rowid:bigint>)
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE
Column stats: COMPLETE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE
Column stats: COMPLETE
- table:
- input format:
org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
- output format:
org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
- serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
- name: default.cmv_mat_view_n6
- Write Type: DELETE
- Reducer 4
Execution mode: vectorized, llap
Reduce Operator Tree:
Group By Operator
@@ -555,53 +507,11 @@ STAGE PLANS:
input format:
org.apache.hadoop.mapred.SequenceFileInputFormat
output format:
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde:
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- Reducer 6
- Execution mode: llap
- Reduce Operator Tree:
- Merge Join Operator
- condition map:
- Inner Join 0 to 1
- keys:
- 0 _col0 (type: int)
- 1 _col0 (type: int)
- outputColumnNames: _col0, _col1, _col3, _col4
- Statistics: Num rows: 1 Data size: 124 Basic stats: COMPLETE
Column stats: COMPLETE
- Select Operator
- expressions: _col0 (type: int), _col3 (type: decimal(10,2)),
(_col1 or _col4) (type: boolean)
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE
Column stats: COMPLETE
- Reduce Output Operator
- key expressions: _col0 (type: int), _col1 (type:
decimal(10,2))
- null sort order: zz
- sort order: ++
- Map-reduce partition columns: _col0 (type: int), _col1
(type: decimal(10,2))
- Statistics: Num rows: 1 Data size: 120 Basic stats:
COMPLETE Column stats: COMPLETE
- value expressions: _col2 (type: boolean)
- Stage: Stage-3
+ Stage: Stage-2
Dependency Collection
Stage: Stage-0
- Move Operator
- tables:
- replace: false
- table:
- input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
- output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
- serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
- name: default.cmv_mat_view_n6
- Write Type: DELETE
-
- Stage: Stage-4
- Stats Work
- Basic Stats Work:
-
- Stage: Stage-6
- Materialized View Update
- name: default.cmv_mat_view_n6
- update creation metadata: true
-
- Stage: Stage-1
Move Operator
tables:
replace: false
@@ -612,7 +522,7 @@ STAGE PLANS:
name: default.cmv_mat_view_n6
Write Type: INSERT
- Stage: Stage-5
+ Stage: Stage-3
Stats Work
Basic Stats Work:
Column Stats Desc:
@@ -620,19 +530,20 @@ STAGE PLANS:
Column Types: int, decimal(10,2)
Table: default.cmv_mat_view_n6
+ Stage: Stage-4
+ Materialized View Update
+ name: default.cmv_mat_view_n6
+ update creation metadata: true
+
PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD
PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD
PREHOOK: Input: default@cmv_basetable_2_n3
PREHOOK: Input: default@cmv_basetable_n6
-PREHOOK: Input: default@cmv_mat_view_n6
-PREHOOK: Output: default@cmv_mat_view_n6
PREHOOK: Output: default@cmv_mat_view_n6
POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD
POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD
POSTHOOK: Input: default@cmv_basetable_2_n3
POSTHOOK: Input: default@cmv_basetable_n6
-POSTHOOK: Input: default@cmv_mat_view_n6
-POSTHOOK: Output: default@cmv_mat_view_n6
POSTHOOK: Output: default@cmv_mat_view_n6
POSTHOOK: Lineage: cmv_mat_view_n6.a SIMPLE
[(cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:a, type:int,
comment:null), ]
POSTHOOK: Lineage: cmv_mat_view_n6.c SIMPLE
[(cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:c, type:decimal(10,2),
comment:null), ]
diff --git
a/ql/src/test/results/clientpositive/llap/materialized_view_join_rebuild.q.out
b/ql/src/test/results/clientpositive/llap/materialized_view_join_rebuild.q.out
deleted file mode 100644
index 9f66625aaa2..00000000000
---
a/ql/src/test/results/clientpositive/llap/materialized_view_join_rebuild.q.out
+++ /dev/null
@@ -1,108 +0,0 @@
-PREHOOK: query: create table cmv_basetable_n6 (a int, b varchar(256), c
decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true')
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@cmv_basetable_n6
-POSTHOOK: query: create table cmv_basetable_n6 (a int, b varchar(256), c
decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true')
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@cmv_basetable_n6
-PREHOOK: query: insert into cmv_basetable_n6 values
- (1, 'alfred', 10.30, 2),
- (2, 'bob', 3.14, 3),
- (2, 'bonnie', 172342.2, 3),
- (3, 'calvin', 978.76, 3),
- (3, 'charlie', 9.8, 1)
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@cmv_basetable_n6
-POSTHOOK: query: insert into cmv_basetable_n6 values
- (1, 'alfred', 10.30, 2),
- (2, 'bob', 3.14, 3),
- (2, 'bonnie', 172342.2, 3),
- (3, 'calvin', 978.76, 3),
- (3, 'charlie', 9.8, 1)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@cmv_basetable_n6
-POSTHOOK: Lineage: cmv_basetable_n6.a SCRIPT []
-POSTHOOK: Lineage: cmv_basetable_n6.b SCRIPT []
-POSTHOOK: Lineage: cmv_basetable_n6.c SCRIPT []
-POSTHOOK: Lineage: cmv_basetable_n6.d SCRIPT []
-PREHOOK: query: create table cmv_basetable_2_n3 (a int, b varchar(256), c
decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true')
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@cmv_basetable_2_n3
-POSTHOOK: query: create table cmv_basetable_2_n3 (a int, b varchar(256), c
decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true')
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@cmv_basetable_2_n3
-PREHOOK: query: insert into cmv_basetable_2_n3 values
- (1, 'alfred', 10.30, 2),
- (3, 'calvin', 978.76, 3)
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@cmv_basetable_2_n3
-POSTHOOK: query: insert into cmv_basetable_2_n3 values
- (1, 'alfred', 10.30, 2),
- (3, 'calvin', 978.76, 3)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@cmv_basetable_2_n3
-POSTHOOK: Lineage: cmv_basetable_2_n3.a SCRIPT []
-POSTHOOK: Lineage: cmv_basetable_2_n3.b SCRIPT []
-POSTHOOK: Lineage: cmv_basetable_2_n3.c SCRIPT []
-POSTHOOK: Lineage: cmv_basetable_2_n3.d SCRIPT []
-PREHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view_n6
- TBLPROPERTIES ('transactional'='true') AS
- SELECT cmv_basetable_n6.a
- FROM cmv_basetable_n6 JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a =
cmv_basetable_2_n3.a)
- WHERE cmv_basetable_2_n3.c > 10.0
-PREHOOK: type: CREATE_MATERIALIZED_VIEW
-PREHOOK: Input: default@cmv_basetable_2_n3
-PREHOOK: Input: default@cmv_basetable_n6
-PREHOOK: Output: database:default
-PREHOOK: Output: default@cmv_mat_view_n6
-POSTHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view_n6
- TBLPROPERTIES ('transactional'='true') AS
- SELECT cmv_basetable_n6.a
- FROM cmv_basetable_n6 JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a =
cmv_basetable_2_n3.a)
- WHERE cmv_basetable_2_n3.c > 10.0
-POSTHOOK: type: CREATE_MATERIALIZED_VIEW
-POSTHOOK: Input: default@cmv_basetable_2_n3
-POSTHOOK: Input: default@cmv_basetable_n6
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@cmv_mat_view_n6
-POSTHOOK: Lineage: cmv_mat_view_n6.a SIMPLE
[(cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:a, type:int,
comment:null), ]
-PREHOOK: query: DELETE from cmv_basetable_2_n3 WHERE a=1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cmv_basetable_2_n3
-PREHOOK: Output: default@cmv_basetable_2_n3
-POSTHOOK: query: DELETE from cmv_basetable_2_n3 WHERE a=1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cmv_basetable_2_n3
-POSTHOOK: Output: default@cmv_basetable_2_n3
-PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD
-PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD
-PREHOOK: Input: default@cmv_basetable_2_n3
-PREHOOK: Input: default@cmv_basetable_n6
-PREHOOK: Input: default@cmv_mat_view_n6
-PREHOOK: Output: default@cmv_mat_view_n6
-PREHOOK: Output: default@cmv_mat_view_n6
-POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD
-POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD
-POSTHOOK: Input: default@cmv_basetable_2_n3
-POSTHOOK: Input: default@cmv_basetable_n6
-POSTHOOK: Input: default@cmv_mat_view_n6
-POSTHOOK: Output: default@cmv_mat_view_n6
-POSTHOOK: Output: default@cmv_mat_view_n6
-POSTHOOK: Lineage: cmv_mat_view_n6.a SIMPLE
[(cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:a, type:int,
comment:null), ]
-PREHOOK: query: SELECT * FROM cmv_mat_view_n6
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cmv_mat_view_n6
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM cmv_mat_view_n6
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cmv_mat_view_n6
-#### A masked pattern was here ####
-3
-3
diff --git
a/ql/src/test/results/clientpositive/llap/materialized_view_repeated_rebuild.q.out
b/ql/src/test/results/clientpositive/llap/materialized_view_repeated_rebuild.q.out
deleted file mode 100644
index 9739f87e96e..00000000000
---
a/ql/src/test/results/clientpositive/llap/materialized_view_repeated_rebuild.q.out
+++ /dev/null
@@ -1,134 +0,0 @@
-PREHOOK: query: create table cmv_basetable_n6 (a int, b varchar(256), c
decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true')
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@cmv_basetable_n6
-POSTHOOK: query: create table cmv_basetable_n6 (a int, b varchar(256), c
decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true')
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@cmv_basetable_n6
-PREHOOK: query: insert into cmv_basetable_n6 values
- (1, 'alfred', 10.30, 2),
- (2, 'bob', 3.14, 3),
- (2, 'bonnie', 172342.2, 3),
- (3, 'calvin', 978.76, 3),
- (3, 'charlie', 9.8, 1)
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@cmv_basetable_n6
-POSTHOOK: query: insert into cmv_basetable_n6 values
- (1, 'alfred', 10.30, 2),
- (2, 'bob', 3.14, 3),
- (2, 'bonnie', 172342.2, 3),
- (3, 'calvin', 978.76, 3),
- (3, 'charlie', 9.8, 1)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@cmv_basetable_n6
-POSTHOOK: Lineage: cmv_basetable_n6.a SCRIPT []
-POSTHOOK: Lineage: cmv_basetable_n6.b SCRIPT []
-POSTHOOK: Lineage: cmv_basetable_n6.c SCRIPT []
-POSTHOOK: Lineage: cmv_basetable_n6.d SCRIPT []
-PREHOOK: query: create table cmv_basetable_2_n3 (a int, b varchar(256), c
decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true')
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@cmv_basetable_2_n3
-POSTHOOK: query: create table cmv_basetable_2_n3 (a int, b varchar(256), c
decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true')
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@cmv_basetable_2_n3
-PREHOOK: query: insert into cmv_basetable_2_n3 values
- (1, 'alfred', 10.30, 2),
- (3, 'calvin', 978.76, 3)
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@cmv_basetable_2_n3
-POSTHOOK: query: insert into cmv_basetable_2_n3 values
- (1, 'alfred', 10.30, 2),
- (3, 'calvin', 978.76, 3)
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@cmv_basetable_2_n3
-POSTHOOK: Lineage: cmv_basetable_2_n3.a SCRIPT []
-POSTHOOK: Lineage: cmv_basetable_2_n3.b SCRIPT []
-POSTHOOK: Lineage: cmv_basetable_2_n3.c SCRIPT []
-POSTHOOK: Lineage: cmv_basetable_2_n3.d SCRIPT []
-PREHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view_n6
- TBLPROPERTIES ('transactional'='true') AS
- SELECT cmv_basetable_n6.a, cmv_basetable_2_n3.c
- FROM cmv_basetable_n6 JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a =
cmv_basetable_2_n3.a)
- WHERE cmv_basetable_2_n3.c > 10.0
-PREHOOK: type: CREATE_MATERIALIZED_VIEW
-PREHOOK: Input: default@cmv_basetable_2_n3
-PREHOOK: Input: default@cmv_basetable_n6
-PREHOOK: Output: database:default
-PREHOOK: Output: default@cmv_mat_view_n6
-POSTHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view_n6
- TBLPROPERTIES ('transactional'='true') AS
- SELECT cmv_basetable_n6.a, cmv_basetable_2_n3.c
- FROM cmv_basetable_n6 JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a =
cmv_basetable_2_n3.a)
- WHERE cmv_basetable_2_n3.c > 10.0
-POSTHOOK: type: CREATE_MATERIALIZED_VIEW
-POSTHOOK: Input: default@cmv_basetable_2_n3
-POSTHOOK: Input: default@cmv_basetable_n6
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@cmv_mat_view_n6
-POSTHOOK: Lineage: cmv_mat_view_n6.a SIMPLE
[(cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:a, type:int,
comment:null), ]
-POSTHOOK: Lineage: cmv_mat_view_n6.c SIMPLE
[(cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:c, type:decimal(10,2),
comment:null), ]
-PREHOOK: query: DELETE from cmv_basetable_2_n3 WHERE a=1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cmv_basetable_2_n3
-PREHOOK: Output: default@cmv_basetable_2_n3
-POSTHOOK: query: DELETE from cmv_basetable_2_n3 WHERE a=1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cmv_basetable_2_n3
-POSTHOOK: Output: default@cmv_basetable_2_n3
-PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD
-PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD
-PREHOOK: Input: default@cmv_basetable_2_n3
-PREHOOK: Input: default@cmv_basetable_n6
-PREHOOK: Input: default@cmv_mat_view_n6
-PREHOOK: Output: default@cmv_mat_view_n6
-PREHOOK: Output: default@cmv_mat_view_n6
-POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD
-POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD
-POSTHOOK: Input: default@cmv_basetable_2_n3
-POSTHOOK: Input: default@cmv_basetable_n6
-POSTHOOK: Input: default@cmv_mat_view_n6
-POSTHOOK: Output: default@cmv_mat_view_n6
-POSTHOOK: Output: default@cmv_mat_view_n6
-POSTHOOK: Lineage: cmv_mat_view_n6.a SIMPLE
[(cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:a, type:int,
comment:null), ]
-POSTHOOK: Lineage: cmv_mat_view_n6.c SIMPLE
[(cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:c, type:decimal(10,2),
comment:null), ]
-PREHOOK: query: DELETE FROM cmv_basetable_n6 WHERE a=1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cmv_basetable_n6
-PREHOOK: Output: default@cmv_basetable_n6
-POSTHOOK: query: DELETE FROM cmv_basetable_n6 WHERE a=1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cmv_basetable_n6
-POSTHOOK: Output: default@cmv_basetable_n6
-PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD
-PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD
-PREHOOK: Input: default@cmv_basetable_2_n3
-PREHOOK: Input: default@cmv_basetable_n6
-PREHOOK: Input: default@cmv_mat_view_n6
-PREHOOK: Output: default@cmv_mat_view_n6
-PREHOOK: Output: default@cmv_mat_view_n6
-POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD
-POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD
-POSTHOOK: Input: default@cmv_basetable_2_n3
-POSTHOOK: Input: default@cmv_basetable_n6
-POSTHOOK: Input: default@cmv_mat_view_n6
-POSTHOOK: Output: default@cmv_mat_view_n6
-POSTHOOK: Output: default@cmv_mat_view_n6
-POSTHOOK: Lineage: cmv_mat_view_n6.a SIMPLE
[(cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:a, type:int,
comment:null), ]
-POSTHOOK: Lineage: cmv_mat_view_n6.c SIMPLE
[(cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:c, type:decimal(10,2),
comment:null), ]
-PREHOOK: query: SELECT * FROM cmv_mat_view_n6
-PREHOOK: type: QUERY
-PREHOOK: Input: default@cmv_mat_view_n6
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM cmv_mat_view_n6
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@cmv_mat_view_n6
-#### A masked pattern was here ####
-3 978.76
-3 978.76