Author: gunther
Date: Fri Jul 25 03:05:03 2014
New Revision: 1613342
URL: http://svn.apache.org/r1613342
Log:
HIVE-7510: Add Greedy Algorithm(LucidDB) For Join Order (Laljo John Pullokkaran
via Gunther Hagleitner)
Modified:
hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
hive/branches/cbo/conf/hive-default.xml.template
hive/branches/cbo/pom.xml
hive/branches/cbo/ql/pom.xml
hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveVolcanoPlanner.java
hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveJoinRel.java
hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HiveRelFieldTrimmer.java
hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RelNodeConverter.java
hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RexNodeConverter.java
hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
Modified:
hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL:
http://svn.apache.org/viewvc/hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1613342&r1=1613341&r2=1613342&view=diff
==============================================================================
--- hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
(original)
+++ hive/branches/cbo/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
Fri Jul 25 03:05:03 2014
@@ -619,6 +619,7 @@ public class HiveConf extends Configurat
HIVE_CBO_ENABLED("hive.cbo.enable", false, "Flag to control enabling Cost
Based Optimizations using Optiq framework."),
HIVE_CBO_MAX_JOINS_SUPPORTED("hive.cbo.max.joins.supported", 10, " Control
queries that will be considered for join reordering, based on number of joins
in them. Beyond a certain number of joins, the cost of considering possible
permutations is prohibitive."),
HIVE_CBO_PULLPROJECTABOVEJOIN_RULE("hive.cbo.project.pullabovejoin.rule",
false, ""),
+ HIVE_CBO_GREEDY_JOIN_ORDER("hive.cbo.greedy.join.order", false, ""),
// hive.mapjoin.bucket.cache.size has been replaced by
hive.smbjoin.cache.row,
// need to remove by hive .13. Also, do not change default (see SMB
operator)
Modified: hive/branches/cbo/conf/hive-default.xml.template
URL:
http://svn.apache.org/viewvc/hive/branches/cbo/conf/hive-default.xml.template?rev=1613342&r1=1613341&r2=1613342&view=diff
==============================================================================
--- hive/branches/cbo/conf/hive-default.xml.template (original)
+++ hive/branches/cbo/conf/hive-default.xml.template Fri Jul 25 03:05:03 2014
@@ -953,6 +953,11 @@
<description/>
</property>
<property>
+ <key>hive.cbo.greedy.join.order</key>
+ <value>false</value>
+ <description/>
+ </property>
+ <property>
<key>hive.mapjoin.bucket.cache.size</key>
<value>100</value>
<description/>
Modified: hive/branches/cbo/pom.xml
URL:
http://svn.apache.org/viewvc/hive/branches/cbo/pom.xml?rev=1613342&r1=1613341&r2=1613342&view=diff
==============================================================================
--- hive/branches/cbo/pom.xml (original)
+++ hive/branches/cbo/pom.xml Fri Jul 25 03:05:03 2014
@@ -199,17 +199,6 @@
<enabled>false</enabled>
</snapshots>
</repository>
- <repository>
- <id>conjars</id>
- <name>Optiq Conjars repository</name>
- <url>http://conjars.org/repo</url>
- <layout>default</layout>
- <releases>
- <enabled>true</enabled>
- <updatePolicy>always</updatePolicy>
- <checksumPolicy>warn</checksumPolicy>
- </releases>
- </repository>
</repositories>
<!-- Hadoop dependency management is done at the bottom under profiles -->
Modified: hive/branches/cbo/ql/pom.xml
URL:
http://svn.apache.org/viewvc/hive/branches/cbo/ql/pom.xml?rev=1613342&r1=1613341&r2=1613342&view=diff
==============================================================================
--- hive/branches/cbo/ql/pom.xml (original)
+++ hive/branches/cbo/ql/pom.xml Fri Jul 25 03:05:03 2014
@@ -28,7 +28,7 @@
<name>Hive Query Language</name>
<properties>
- <optiq.version>0.7</optiq.version>
+ <optiq.version>0.9.0-incubating-SNAPSHOT</optiq.version>
<hive.path.to.root>..</hive.path.to.root>
</properties>
@@ -183,7 +183,7 @@
<version>${datanucleus-core.version}</version>
</dependency>
<dependency>
- <groupId>net.hydromatic</groupId>
+ <groupId>org.apache.optiq</groupId>
<artifactId>optiq-core</artifactId>
<version>${optiq.version}</version>
<exclusions>
Modified:
hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveVolcanoPlanner.java
URL:
http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveVolcanoPlanner.java?rev=1613342&r1=1613341&r2=1613342&view=diff
==============================================================================
---
hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveVolcanoPlanner.java
(original)
+++
hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveVolcanoPlanner.java
Fri Jul 25 03:05:03 2014
@@ -17,7 +17,7 @@ public class HiveVolcanoPlanner extends
/** Creates a HiveVolcanoPlanner. */
public HiveVolcanoPlanner() {
- super(HiveCost.FACTORY);
+ super(HiveCost.FACTORY, null);
}
public static RelOptPlanner createPlanner() {
Modified:
hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveJoinRel.java
URL:
http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveJoinRel.java?rev=1613342&r1=1613341&r2=1613342&view=diff
==============================================================================
---
hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveJoinRel.java
(original)
+++
hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveJoinRel.java
Fri Jul 25 03:05:03 2014
@@ -81,15 +81,15 @@ public class HiveJoinRel extends JoinRel
@Override
public final HiveJoinRel copy(RelTraitSet traitSet, RexNode conditionExpr,
RelNode left,
- RelNode right, JoinRelType joinType) {
- return copy(traitSet, conditionExpr, left, right, m_joinAlgorithm,
m_mapJoinStreamingSide);
+ RelNode right, JoinRelType joinType, boolean semiJoinDone) {
+ return copy(traitSet, conditionExpr, left, right, m_joinAlgorithm,
m_mapJoinStreamingSide, m_leftSemiJoin);
}
public HiveJoinRel copy(RelTraitSet traitSet, RexNode conditionExpr, RelNode
left, RelNode right,
- JoinAlgorithm joinalgo, MapJoinStreamingRelation streamingSide) {
+ JoinAlgorithm joinalgo, MapJoinStreamingRelation streamingSide, boolean
semiJoinDone) {
try {
return new HiveJoinRel(getCluster(), traitSet, left, right,
conditionExpr, joinType,
- variablesStopped, joinalgo, streamingSide, this.m_leftSemiJoin);
+ variablesStopped, joinalgo, streamingSide, semiJoinDone);
} catch (InvalidRelException e) {
// Semantic error not possible. Must be a bug. Convert to
// internal error.
Modified:
hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HiveRelFieldTrimmer.java
URL:
http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HiveRelFieldTrimmer.java?rev=1613342&r1=1613341&r2=1613342&view=diff
==============================================================================
---
hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HiveRelFieldTrimmer.java
(original)
+++
hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HiveRelFieldTrimmer.java
Fri Jul 25 03:05:03 2014
@@ -547,7 +547,7 @@ public class HiveRelFieldTrimmer impleme
RexNode newConditionExpr = conditionExpr.accept(shuttle);
final HiveJoinRel newJoin = join.copy(join.getTraitSet(), newConditionExpr,
- newInputs.get(0), newInputs.get(1), join.getJoinType());
+ newInputs.get(0), newInputs.get(1), join.getJoinType(), false);
return new TrimResult(newJoin, mapping);
}
Modified:
hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RelNodeConverter.java
URL:
http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RelNodeConverter.java?rev=1613342&r1=1613341&r2=1613342&view=diff
==============================================================================
---
hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RelNodeConverter.java
(original)
+++
hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RelNodeConverter.java
Fri Jul 25 03:05:03 2014
@@ -73,6 +73,7 @@ import org.eigenbase.reltype.RelDataType
import org.eigenbase.rex.RexCall;
import org.eigenbase.rex.RexInputRef;
import org.eigenbase.rex.RexNode;
+import org.eigenbase.rex.RexUtil;
import org.eigenbase.sql.fun.SqlStdOperatorTable;
import org.eigenbase.util.CompositeList;
import org.eigenbase.util.Pair;
@@ -83,11 +84,15 @@ import com.google.common.collect.Lists;
public class RelNodeConverter {
private static final Map<String, Aggregation> AGG_MAP = ImmutableMap
- .<String, Aggregation> builder()
- .put("count", (Aggregation) SqlStdOperatorTable.COUNT)
- .put("sum", SqlStdOperatorTable.SUM).put("min", SqlStdOperatorTable.MIN)
- .put("max", SqlStdOperatorTable.MAX).put("avg", SqlStdOperatorTable.AVG)
- .build();
+ .<String,
Aggregation> builder()
+ .put(
+ "count",
+ (Aggregation)
SqlStdOperatorTable.COUNT)
+ .put("sum",
SqlStdOperatorTable.SUM)
+ .put("min",
SqlStdOperatorTable.MIN)
+ .put("max",
SqlStdOperatorTable.MAX)
+ .put("avg",
SqlStdOperatorTable.AVG)
+ .build();
public static RelNode convert(Operator<? extends OperatorDesc> sinkOp,
RelOptCluster cluster,
RelOptSchema schema, SemanticAnalyzer sA, ParseContext pCtx) {
@@ -228,13 +233,16 @@ public class RelNodeConverter {
opPositionMap.put(node, opPositionMap.get(parent));
}
- RexNode convertToOptiqExpr(final ExprNodeDesc expr, final RelNode optiqOP,
final boolean flatten) throws SemanticException {
+ RexNode convertToOptiqExpr(final ExprNodeDesc expr, final RelNode optiqOP,
final boolean flatten)
+ throws SemanticException {
return convertToOptiqExpr(expr, optiqOP, 0, flatten);
}
- RexNode convertToOptiqExpr(final ExprNodeDesc expr, final RelNode optiqOP,
int offset, final boolean flatten) throws SemanticException {
+ RexNode convertToOptiqExpr(final ExprNodeDesc expr, final RelNode optiqOP,
int offset,
+ final boolean flatten) throws SemanticException {
ImmutableMap<String, Integer> posMap = opPositionMap.get(optiqOP);
- RexNodeConverter c = new RexNodeConverter(cluster, optiqOP.getRowType(),
posMap, offset, flatten);
+ RexNodeConverter c = new RexNodeConverter(cluster, optiqOP.getRowType(),
posMap, offset,
+ flatten);
return c.convert(expr);
}
@@ -347,7 +355,8 @@ public class RelNodeConverter {
}
}
- joinRel = HiveJoinRel.getJoin(ctx.cluster, leftRel, rightRel,
joinPredicate, joinType, false);
+ joinRel = HiveJoinRel.getJoin(ctx.cluster, leftRel, rightRel,
joinPredicate, joinType,
+ false);
} else {
throw new RuntimeException("Right & Left of Join Condition columns are
not equal");
}
@@ -405,15 +414,15 @@ public class RelNodeConverter {
Context ctx = (Context) procCtx;
HiveRel input = (HiveRel) ctx.getParentNode((Operator<? extends
OperatorDesc>) nd, 0);
FilterOperator filterOp = (FilterOperator) nd;
- RexNode convertedFilterExpr = ctx
- .convertToOptiqExpr(filterOp.getConf().getPredicate(), input, true);
+ RexNode convertedFilterExpr =
ctx.convertToOptiqExpr(filterOp.getConf().getPredicate(),
+ input, true);
// Flatten the condition otherwise Optiq chokes on assertion
// (FilterRelBase)
if (convertedFilterExpr instanceof RexCall) {
RexCall call = (RexCall) convertedFilterExpr;
- convertedFilterExpr =
ctx.cluster.getRexBuilder().makeFlatCall(call.getOperator(),
- call.getOperands());
+ convertedFilterExpr =
ctx.cluster.getRexBuilder().makeCall(call.getType(),
+ call.getOperator(), RexUtil.flatten(call.getOperands(),
call.getOperator()));
}
HiveRel filtRel = new HiveFilterRel(ctx.cluster,
ctx.cluster.traitSetOf(HiveRel.CONVENTION),
@@ -553,7 +562,7 @@ public class RelNodeConverter {
/*
* numReducers == 1 and order.length = 1 => a RS for CrossJoin.
*/
- if ( order.length() == 0 ) {
+ if (order.length() == 0) {
Operator<? extends OperatorDesc> op = (Operator<? extends
OperatorDesc>) nd;
ctx.hiveOpToRelNode.put(op, input);
return input;
@@ -609,13 +618,12 @@ public class RelNodeConverter {
TableScanOperator tableScanOp = (TableScanOperator) nd;
RowResolver rr = ctx.sA.getRowResolver(tableScanOp);
- List<String> neededCols = new ArrayList<String>(
- tableScanOp.getNeededColumns());
+ List<String> neededCols = new
ArrayList<String>(tableScanOp.getNeededColumns());
Statistics stats = tableScanOp.getStatistics();
try {
- stats = addPartitionColumns(ctx, tableScanOp, tableScanOp.getConf()
- .getAlias(), ctx.sA.getTable(tableScanOp), stats, neededCols);
+ stats = addPartitionColumns(ctx, tableScanOp,
tableScanOp.getConf().getAlias(),
+ ctx.sA.getTable(tableScanOp), stats, neededCols);
} catch (CloneNotSupportedException ce) {
throw new SemanticException(ce);
}
@@ -637,9 +645,8 @@ public class RelNodeConverter {
/*
* Add partition columns to needed columns and fake the COlStats for it.
*/
- private Statistics addPartitionColumns(Context ctx,
- TableScanOperator tableScanOp, String tblAlias, Table tbl,
- Statistics stats, List<String> neededCols)
+ private Statistics addPartitionColumns(Context ctx, TableScanOperator
tableScanOp,
+ String tblAlias, Table tbl, Statistics stats, List<String> neededCols)
throws CloneNotSupportedException {
if (!tbl.isPartitioned()) {
return stats;
@@ -648,11 +655,9 @@ public class RelNodeConverter {
List<FieldSchema> pCols = tbl.getPartCols();
for (FieldSchema pC : pCols) {
neededCols.add(pC.getName());
- ColStatistics cStats = stats.getColumnStatisticsForColumn(tblAlias,
- pC.getName());
+ ColStatistics cStats = stats.getColumnStatisticsForColumn(tblAlias,
pC.getName());
if (cStats == null) {
- PrunedPartitionList partList = ctx.parseCtx.getOpToPartList().get(
- tableScanOp);
+ PrunedPartitionList partList =
ctx.parseCtx.getOpToPartList().get(tableScanOp);
cStats = new ColStatistics(tblAlias, pC.getName(), pC.getType());
cStats.setCountDistint(partList.getPartitions().size());
pStats.add(cStats);
Modified:
hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RexNodeConverter.java
URL:
http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RexNodeConverter.java?rev=1613342&r1=1613341&r2=1613342&view=diff
==============================================================================
---
hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RexNodeConverter.java
(original)
+++
hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RexNodeConverter.java
Fri Jul 25 03:05:03 2014
@@ -37,6 +37,7 @@ import org.eigenbase.reltype.RelDataType
import org.eigenbase.rex.RexBuilder;
import org.eigenbase.rex.RexCall;
import org.eigenbase.rex.RexNode;
+import org.eigenbase.rex.RexUtil;
import org.eigenbase.sql.SqlOperator;
import org.eigenbase.sql.fun.SqlCastFunction;
import org.eigenbase.sql.type.SqlTypeName;
@@ -126,14 +127,17 @@ public class RexNodeConverter {
// This is an explicit cast
RexNode expr = null;
+ RelDataType retType = null;
expr = handleExplicitCast(func, childRexNodeLst);
if (expr == null) {
- RelDataType retType = (expr != null) ? expr.getType() :
TypeConverter.convert(
- func.getTypeInfo(), m_cluster.getTypeFactory());
+ retType = (expr != null) ? expr.getType() :
TypeConverter.convert(func.getTypeInfo(),
+ m_cluster.getTypeFactory());
SqlOperator optiqOp =
SqlFunctionConverter.getOptiqOperator(func.getGenericUDF(),
argTypeBldr.build(), retType);
expr = m_cluster.getRexBuilder().makeCall(optiqOp, childRexNodeLst);
+ } else {
+ retType = expr.getType();
}
// TODO: Cast Function in Optiq have a bug where it infertype on cast
throws
@@ -141,7 +145,8 @@ public class RexNodeConverter {
if (m_flattenExpr && (expr instanceof RexCall)
&& !(((RexCall) expr).getOperator() instanceof SqlCastFunction)) {
RexCall call = (RexCall) expr;
- expr = m_cluster.getRexBuilder().makeFlatCall(call.getOperator(),
call.getOperands());
+ expr = m_cluster.getRexBuilder().makeCall(retType, call.getOperator(),
+ RexUtil.flatten(call.getOperands(), call.getOperator()));
}
return expr;
Modified:
hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL:
http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1613342&r1=1613341&r2=1613342&view=diff
==============================================================================
---
hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
(original)
+++
hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
Fri Jul 25 03:05:03 2014
@@ -223,7 +223,6 @@ import org.apache.hadoop.hive.serde2.typ
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.mapred.InputFormat;
-
import org.eigenbase.rel.AggregateCall;
import org.eigenbase.rel.Aggregation;
import org.eigenbase.rel.InvalidRelException;
@@ -235,13 +234,17 @@ import org.eigenbase.rel.RelNode;
import org.eigenbase.rel.metadata.CachingRelMetadataProvider;
import org.eigenbase.rel.metadata.ChainedRelMetadataProvider;
import org.eigenbase.rel.metadata.RelMetadataProvider;
+import org.eigenbase.rel.rules.ConvertMultiJoinRule;
+import org.eigenbase.rel.rules.LoptOptimizeJoinRule;
import org.eigenbase.relopt.RelOptCluster;
import org.eigenbase.relopt.RelOptPlanner;
import org.eigenbase.relopt.RelOptQuery;
import org.eigenbase.relopt.RelOptRule;
import org.eigenbase.relopt.RelOptSchema;
import org.eigenbase.relopt.RelTraitSet;
+import org.eigenbase.relopt.hep.HepMatchOrder;
import org.eigenbase.relopt.hep.HepPlanner;
+import org.eigenbase.relopt.hep.HepProgram;
import org.eigenbase.relopt.hep.HepProgramBuilder;
import org.eigenbase.reltype.RelDataType;
import org.eigenbase.reltype.RelDataTypeFactory;
@@ -254,6 +257,7 @@ import org.eigenbase.util.CompositeList;
import com.google.common.base.Function;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
/**
@@ -11801,13 +11805,14 @@ public class SemanticAnalyzer extends Ba
}
@Override
- public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema,
- SchemaPlus rootSchema) {
- RelOptPlanner planner = HiveVolcanoPlanner.createPlanner();
-
+ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema,
SchemaPlus rootSchema) {
+ RelNode optiqGenPlan = null;
+ RelNode optiqPreCboPlan = null;
+ RelNode optiqOptimizedPlan = null;
/*
* recreate cluster, so that it picks up the additional traitDef
*/
+ RelOptPlanner planner = HiveVolcanoPlanner.createPlanner();
final RelOptQuery query = new RelOptQuery(planner);
final RexBuilder rexBuilder = cluster.getRexBuilder();
cluster = query.createCluster(rexBuilder.getTypeFactory(), rexBuilder);
@@ -11816,46 +11821,59 @@ public class SemanticAnalyzer extends Ba
m_relOptSchema = relOptSchema;
m_rootSchema = rootSchema;
- RelNode optiqPlan = null;
try {
- optiqPlan = genLogicalPlan(qb);
+ optiqGenPlan = genLogicalPlan(qb);
} catch (SemanticException e) {
m_semanticException = e;
throw new RuntimeException(e);
}
- optiqPlan = applyPreCBOTransforms(optiqPlan,
- HiveDefaultRelMetadataProvider.INSTANCE);
-
+ optiqPreCboPlan = applyPreCBOTransforms(optiqGenPlan,
HiveDefaultRelMetadataProvider.INSTANCE);
List<RelMetadataProvider> list = Lists.newArrayList();
list.add(HiveDefaultRelMetadataProvider.INSTANCE);
- planner.registerMetadataProviders(list);
- RelMetadataProvider chainedProvider =
ChainedRelMetadataProvider.of(list);
- cluster.setMetadataProvider(new CachingRelMetadataProvider(
- chainedProvider, planner));
+ if (!HiveConf.getBoolVar(conf,
HiveConf.ConfVars.HIVE_CBO_GREEDY_JOIN_ORDER)) {
+ planner.registerMetadataProviders(list);
+
+ RelMetadataProvider chainedProvider =
ChainedRelMetadataProvider.of(list);
+ cluster.setMetadataProvider(new
CachingRelMetadataProvider(chainedProvider, planner));
- planner.addRule(HiveSwapJoinRule.INSTANCE);
- planner.addRule(HivePushJoinThroughJoinRule.LEFT);
- planner.addRule(HivePushJoinThroughJoinRule.RIGHT);
- if (HiveConf.getBoolVar(conf,
- HiveConf.ConfVars.HIVE_CBO_PULLPROJECTABOVEJOIN_RULE)) {
- planner.addRule(HivePullUpProjectsAboveJoinRule.BOTH_PROJECT);
- planner.addRule(HivePullUpProjectsAboveJoinRule.LEFT_PROJECT);
- planner.addRule(HivePullUpProjectsAboveJoinRule.RIGHT_PROJECT);
- planner.addRule(HiveMergeProjectRule.INSTANCE);
- }
-
- RelTraitSet desiredTraits = cluster.traitSetOf(HiveRel.CONVENTION,
- RelCollationImpl.EMPTY);
-
- RelNode rootRel = optiqPlan;
- if (!optiqPlan.getTraitSet().equals(desiredTraits)) {
- rootRel = planner.changeTraits(optiqPlan, desiredTraits);
+ planner.addRule(HiveSwapJoinRule.INSTANCE);
+ planner.addRule(HivePushJoinThroughJoinRule.LEFT);
+ planner.addRule(HivePushJoinThroughJoinRule.RIGHT);
+ if (HiveConf.getBoolVar(conf,
HiveConf.ConfVars.HIVE_CBO_PULLPROJECTABOVEJOIN_RULE)) {
+ planner.addRule(HivePullUpProjectsAboveJoinRule.BOTH_PROJECT);
+ planner.addRule(HivePullUpProjectsAboveJoinRule.LEFT_PROJECT);
+ planner.addRule(HivePullUpProjectsAboveJoinRule.RIGHT_PROJECT);
+ planner.addRule(HiveMergeProjectRule.INSTANCE);
+
+ RelTraitSet desiredTraits = cluster
+ .traitSetOf(HiveRel.CONVENTION, RelCollationImpl.EMPTY);
+
+ RelNode rootRel = optiqPreCboPlan;
+ if (!optiqPreCboPlan.getTraitSet().equals(desiredTraits)) {
+ rootRel = planner.changeTraits(optiqPreCboPlan, desiredTraits);
+ }
+ planner.setRoot(rootRel);
+
+ optiqOptimizedPlan = planner.findBestExp();
+ }
+ } else {
+ final HepProgram hepPgm = new
HepProgramBuilder().addMatchOrder(HepMatchOrder.BOTTOM_UP)
+ .addRuleInstance(new ConvertMultiJoinRule(HiveJoinRel.class))
+ .addRuleInstance(LoptOptimizeJoinRule.INSTANCE).build();
+
+ HepPlanner hepPlanner = new HepPlanner(hepPgm);
+
+ hepPlanner.registerMetadataProviders(list);
+ RelMetadataProvider chainedProvider =
ChainedRelMetadataProvider.of(list);
+ cluster.setMetadataProvider(new
CachingRelMetadataProvider(chainedProvider, hepPlanner));
+
+ hepPlanner.setRoot(optiqPreCboPlan);
+ optiqOptimizedPlan = hepPlanner.findBestExp();
}
- planner.setRoot(rootRel);
- return planner.findBestExp();
+ return optiqOptimizedPlan;
}
public RelNode applyPreCBOTransforms(RelNode basePlan,