HIVE-19237: Only use an operatorId once in a plan (Zoltan Haindrich reviewed by 
Ashutosh Chauhan)

Signed-off-by: Zoltan Haindrich <k...@rxd.hu>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e8f283cf
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e8f283cf
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e8f283cf

Branch: refs/heads/branch-3
Commit: e8f283cf198953b4c464b044c9aac7f70a22bd15
Parents: d0e3c19
Author: Zoltan Haindrich <k...@rxd.hu>
Authored: Wed Jun 13 13:41:15 2018 +0200
Committer: Zoltan Haindrich <k...@rxd.hu>
Committed: Wed Jun 13 13:41:15 2018 +0200

----------------------------------------------------------------------
 data/conf/llap/hive-site.xml                    |    2 +-
 .../java/org/apache/hadoop/hive/ql/Context.java |    5 +
 .../apache/hadoop/hive/ql/exec/Operator.java    |   23 +-
 .../hadoop/hive/ql/exec/OperatorUtils.java      |    4 +-
 .../hive/ql/exec/SerializationUtilities.java    |   14 +-
 .../ql/hooks/NoOperatorReuseCheckerHook.java    |  109 ++
 .../ql/optimizer/physical/MemoryDecider.java    |   12 +-
 .../ql/optimizer/physical/SerializeFilter.java  |    6 +-
 .../ql/parse/ColumnStatsAutoGatherContext.java  |    1 +
 .../hive/ql/parse/spark/SplitOpTreeForDPP.java  |    7 +-
 .../hadoop/hive/ql/plan/MergeJoinWork.java      |    7 +-
 .../ql/plan/mapping/TestCounterMapping.java     |    8 +-
 .../hive/ql/plan/mapping/TestOperatorCmp.java   |   16 +-
 .../results/clientpositive/auto_join0.q.out     |    8 +-
 .../cbo_rp_cross_product_check_2.q.out          |    4 +-
 .../clientpositive/cross_product_check_2.q.out  |    4 +-
 .../llap/bucketizedhiveinputformat.q.out        |    2 +-
 .../clientpositive/llap/constprog_dpp.q.out     |   28 +-
 .../clientpositive/llap/dp_counter_mm.q.out     |  172 +-
 .../clientpositive/llap/dp_counter_non_mm.q.out |  172 +-
 .../llap/dynamic_semijoin_user_level.q.out      |   16 +-
 .../clientpositive/llap/explainanalyze_2.q.out  |  672 +++----
 .../clientpositive/llap/explainuser_1.q.out     |  128 +-
 .../clientpositive/llap/explainuser_2.q.out     | 1840 +++++++++---------
 .../results/clientpositive/llap/lineage3.q.out  |    9 +-
 .../results/clientpositive/llap/orc_llap.q.out  |    4 +-
 .../llap/table_access_keys_stats.q.out          |    2 +-
 .../llap/tez_input_counters.q.out               |   14 +-
 .../clientpositive/llap/union_fast_stats.q.out  |  624 +++---
 .../test/results/clientpositive/mapjoin47.q.out |    4 +-
 .../clientpositive/perf/spark/query77.q.out     |    2 +-
 .../clientpositive/perf/tez/query14.q.out       |  746 +++----
 .../clientpositive/perf/tez/query2.q.out        |   52 +-
 .../clientpositive/perf/tez/query23.q.out       |  196 +-
 .../clientpositive/perf/tez/query33.q.out       |  106 +-
 .../clientpositive/perf/tez/query38.q.out       |  106 +-
 .../clientpositive/perf/tez/query49.q.out       |  154 +-
 .../clientpositive/perf/tez/query5.q.out        |  140 +-
 .../clientpositive/perf/tez/query54.q.out       |  160 +-
 .../clientpositive/perf/tez/query56.q.out       |  106 +-
 .../clientpositive/perf/tez/query60.q.out       |  106 +-
 .../clientpositive/perf/tez/query66.q.out       |   96 +-
 .../clientpositive/perf/tez/query71.q.out       |   86 +-
 .../clientpositive/perf/tez/query75.q.out       |  236 +--
 .../clientpositive/perf/tez/query76.q.out       |   96 +-
 .../clientpositive/perf/tez/query77.q.out       |  168 +-
 .../clientpositive/perf/tez/query8.q.out        |   84 +-
 .../clientpositive/perf/tez/query80.q.out       |  162 +-
 .../clientpositive/perf/tez/query87.q.out       |  132 +-
 .../results/clientpositive/smb_mapjoin_25.q.out |   16 +-
 .../results/clientpositive/smb_mapjoin_47.q.out |    4 +-
 .../spark/spark_explainuser_1.q.out             |   64 +-
 .../clientpositive/subquery_multiinsert.q.out   |   12 +-
 .../clientpositive/tez/explainanalyze_1.q.out   |   36 +-
 .../clientpositive/tez/explainanalyze_3.q.out   |   16 +-
 .../clientpositive/tez/explainanalyze_5.q.out   |   28 +-
 .../clientpositive/tez/explainuser_3.q.out      |   12 +-
 .../clientpositive/vector_groupby_mapjoin.q.out |    8 +-
 58 files changed, 3623 insertions(+), 3424 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e8f283cf/data/conf/llap/hive-site.xml
----------------------------------------------------------------------
diff --git a/data/conf/llap/hive-site.xml b/data/conf/llap/hive-site.xml
index 1507a56..44ca6c9 100644
--- a/data/conf/llap/hive-site.xml
+++ b/data/conf/llap/hive-site.xml
@@ -163,7 +163,7 @@
 
 <property>
   <name>hive.exec.post.hooks</name>
-  <value>org.apache.hadoop.hive.ql.hooks.PostExecutePrinter, 
org.apache.hadoop.hive.ql.hooks.RuntimeStatsPersistenceCheckerHook</value>
+  <value>org.apache.hadoop.hive.ql.hooks.PostExecutePrinter, 
org.apache.hadoop.hive.ql.hooks.RuntimeStatsPersistenceCheckerHook, 
org.apache.hadoop.hive.ql.hooks.NoOperatorReuseCheckerHook</value>
   <description>Post Execute Hook for Tests</description>
 </property>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e8f283cf/ql/src/java/org/apache/hadoop/hive/ql/Context.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Context.java 
b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
index 9eda4ed..e4e3d48 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Context.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
@@ -1026,6 +1026,10 @@ public class Context {
     return opContext;
   }
 
+  public void setOpContext(CompilationOpContext opContext) {
+    this.opContext = opContext;
+  }
+
   public Heartbeater getHeartbeater() {
     return heartbeater;
   }
@@ -1124,4 +1128,5 @@ public class Context {
   public void setTempTableForLoad(Table tempTableForLoad) {
     this.tempTableForLoad = tempTableForLoad;
   }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/e8f283cf/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
index 4e9784d..acadb43 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
@@ -89,6 +89,7 @@ public abstract class Operator<T extends OperatorDesc> 
implements Serializable,C
   protected int indexForTezUnion = -1;
   private transient Configuration hconf;
   protected final transient Collection<Future<?>> asyncInitOperations = new 
HashSet<>();
+  private String marker;
 
   protected int bucketingVersion = -1;
   // It can be optimized later so that an operator operator (init/close) is 
performed
@@ -134,6 +135,7 @@ public abstract class Operator<T extends OperatorDesc> 
implements Serializable,C
     initOperatorId();
   }
 
+  /** Kryo ctor. */
   protected Operator() {
     childOperators = new ArrayList<Operator<? extends OperatorDesc>>();
     parentOperators = new ArrayList<Operator<? extends OperatorDesc>>();
@@ -244,10 +246,6 @@ public abstract class Operator<T extends OperatorDesc> 
implements Serializable,C
   protected transient ObjectInspector outputObjInspector;
 
 
-  public void setId(String id) {
-    this.id = id;
-  }
-
   /**
    * This function is not named getId(), to make sure java serialization does
    * NOT serialize it. Some TestParse tests will fail if we serialize this
@@ -1167,12 +1165,16 @@ public abstract class Operator<T extends OperatorDesc> 
implements Serializable,C
     return operatorId;
   }
 
-  public void initOperatorId() {
-    setOperatorId(getName() + "_" + this.id);
+  public String getMarker() {
+    return marker;
   }
 
-  public void setOperatorId(String operatorId) {
-    this.operatorId = operatorId;
+  public void setMarker(String marker) {
+    this.marker = marker;
+  }
+
+  public void initOperatorId() {
+    this.operatorId = getName() + "_" + this.id;
   }
 
   /*
@@ -1539,7 +1541,12 @@ public abstract class Operator<T extends OperatorDesc> 
implements Serializable,C
   }
 
   public void setCompilationOpContext(CompilationOpContext ctx) {
+    if (cContext == ctx) {
+      return;
+    }
     cContext = ctx;
+    id = String.valueOf(ctx.nextOperatorId());
+    initOperatorId();
   }
 
   /** @return Compilation operator context. Only available during compilation. 
*/

http://git-wip-us.apache.org/repos/asf/hive/blob/e8f283cf/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java
index 4634928..7b2ae40 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java
@@ -427,12 +427,12 @@ public class OperatorUtils {
     return matchingOps;
   }
 
-  public static Operator<?> findOperatorById(Operator<?> start, String opId) {
+  public static Operator<?> findOperatorByMarker(Operator<?> start, String 
marker) {
     Deque<Operator<?>> queue = new ArrayDeque<>();
     queue.add(start);
     while (!queue.isEmpty()) {
       Operator<?> op = queue.remove();
-      if (op.getOperatorId().equals(opId)) {
+      if (marker.equals(op.getMarker())) {
         return op;
       }
       if (op.getChildOperators() != null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/e8f283cf/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java
index 66f0a00..ed1566f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hive.ql.exec;
 
-import java.util.LinkedList;
-
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.InputStream;
@@ -30,8 +28,10 @@ import java.lang.reflect.Field;
 import java.net.URI;
 import java.sql.Timestamp;
 import java.time.ZoneId;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
@@ -39,16 +39,14 @@ import java.util.Properties;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.type.TimestampTZ;
 import org.apache.hadoop.hive.common.CopyOnFirstWriteProperties;
+import org.apache.hadoop.hive.common.type.TimestampTZ;
 import org.apache.hadoop.hive.ql.CompilationOpContext;
 import org.apache.hadoop.hive.ql.exec.vector.VectorFileSinkOperator;
 import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
 import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat;
 import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
 import org.apache.hadoop.hive.ql.log.PerfLogger;
-import org.apache.hadoop.hive.ql.metadata.Hive;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.AbstractOperatorDesc;
 import org.apache.hadoop.hive.ql.plan.BaseWork;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
@@ -225,6 +223,7 @@ public class SerializationUtilities {
   private static final Object FAKE_REFERENCE = new Object();
 
   private static KryoFactory factory = new KryoFactory() {
+    @Override
     public Kryo create() {
       KryoWithHooks kryo = new KryoWithHooks();
       kryo.register(java.sql.Date.class, new SqlDateSerializer());
@@ -646,8 +645,11 @@ public class SerializationUtilities {
    * @return The clone.
    */
   public static List<Operator<?>> cloneOperatorTree(List<Operator<?>> roots) {
+    if (roots.isEmpty()) {
+      return new ArrayList<>();
+    }
     ByteArrayOutputStream baos = new ByteArrayOutputStream(4096);
-    CompilationOpContext ctx = roots.isEmpty() ? null : 
roots.get(0).getCompilationOpContext();
+    CompilationOpContext ctx = roots.get(0).getCompilationOpContext();
     serializePlan(roots, baos, true);
     @SuppressWarnings("unchecked")
     List<Operator<?>> result =

http://git-wip-us.apache.org/repos/asf/hive/blob/e8f283cf/ql/src/java/org/apache/hadoop/hive/ql/hooks/NoOperatorReuseCheckerHook.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/hooks/NoOperatorReuseCheckerHook.java 
b/ql/src/java/org/apache/hadoop/hive/ql/hooks/NoOperatorReuseCheckerHook.java
new file mode 100644
index 0000000..3fc5429
--- /dev/null
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/hooks/NoOperatorReuseCheckerHook.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.hooks;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Stack;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.hive.ql.exec.Operator;
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
+import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
+import org.apache.hadoop.hive.ql.lib.Dispatcher;
+import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.Node;
+import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.BaseWork;
+import org.apache.hadoop.hive.ql.plan.MapWork;
+import org.apache.hadoop.hive.ql.plan.MapredWork;
+import org.apache.hadoop.hive.ql.plan.ReduceWork;
+import org.apache.hadoop.hive.ql.plan.TezWork;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Checks whenever operator ids are not reused.
+ */
+public class NoOperatorReuseCheckerHook implements ExecuteWithHookContext {
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(NoOperatorReuseCheckerHook.class);
+
+  static class UniqueOpIdChecker implements NodeProcessor {
+
+    Map<String, Operator<?>> opMap = new HashMap<>();
+
+    @Override
+    public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx 
procCtx, Object... nodeOutputs)
+        throws SemanticException {
+      Operator op = (Operator) nd;
+      String opKey = op.getOperatorId();
+      Operator<?> found = opMap.get(opKey);
+      if (found != null) {
+        throw new RuntimeException("operator id reuse found: " + opKey);
+      }
+      opMap.put(opKey, op);
+      return null;
+    }
+  }
+
+  @Override
+  public void run(HookContext hookContext) throws Exception {
+
+    List<Node> rootOps = Lists.newArrayList();
+
+    ArrayList<Task<? extends Serializable>> roots = 
hookContext.getQueryPlan().getRootTasks();
+    for (Task<? extends Serializable> task : roots) {
+
+      Object work = task.getWork();
+      if (work instanceof MapredWork) {
+        MapredWork mapredWork = (MapredWork) work;
+        MapWork mapWork = mapredWork.getMapWork();
+        if (mapWork != null) {
+          rootOps.addAll(mapWork.getAllRootOperators());
+        }
+        ReduceWork reduceWork = mapredWork.getReduceWork();
+        if (reduceWork != null) {
+          rootOps.addAll(reduceWork.getAllRootOperators());
+        }
+      }
+      if (work instanceof TezWork) {
+        for (BaseWork bw : ((TezWork) work).getAllWorkUnsorted()) {
+          rootOps.addAll(bw.getAllRootOperators());
+        }
+      }
+    }
+    if (rootOps.isEmpty()) {
+      return;
+    }
+
+    Dispatcher disp = new DefaultRuleDispatcher(new UniqueOpIdChecker(), new 
HashMap<>(), null);
+    GraphWalker ogw = new DefaultGraphWalker(disp);
+
+    HashMap<Node, Object> nodeOutput = new HashMap<Node, Object>();
+    ogw.startWalking(rootOps, nodeOutput);
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/e8f283cf/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MemoryDecider.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MemoryDecider.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MemoryDecider.java
index bf7a644..64f1e7b 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MemoryDecider.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MemoryDecider.java
@@ -20,21 +20,19 @@ package org.apache.hadoop.hive.ql.optimizer.physical;
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Comparator;
-import java.util.Iterator;
 import java.util.HashMap;
-import java.util.Map;
+import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.LinkedHashSet;
+import java.util.Map;
 import java.util.Set;
 import java.util.SortedSet;
 import java.util.Stack;
 import java.util.TreeSet;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.exec.StatsTask;
 import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
+import org.apache.hadoop.hive.ql.exec.StatsTask;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.tez.TezTask;
 import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
@@ -54,6 +52,8 @@ import org.apache.hadoop.hive.ql.plan.MapWork;
 import org.apache.hadoop.hive.ql.plan.MergeJoinWork;
 import org.apache.hadoop.hive.ql.plan.ReduceWork;
 import org.apache.hadoop.hive.ql.plan.TezWork;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * MemoryDecider is a simple physical optimizer that adjusts the memory layout 
of tez tasks.
@@ -129,7 +129,7 @@ public class MemoryDecider implements PhysicalPlanResolver {
       Dispatcher disp = null;
       final Set<MapJoinOperator> mapJoins = new 
LinkedHashSet<MapJoinOperator>();
 
-      Map<Rule, NodeProcessor> rules = new HashMap<Rule, NodeProcessor>();
+      LinkedHashMap<Rule, NodeProcessor> rules = new LinkedHashMap<Rule, 
NodeProcessor>();
       rules.put(new RuleRegExp("Map join memory estimator",
               MapJoinOperator.getOperatorName() + "%"), new NodeProcessor() {
           @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/e8f283cf/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SerializeFilter.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SerializeFilter.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SerializeFilter.java
index 6a0ca5d..9224350 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SerializeFilter.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SerializeFilter.java
@@ -19,15 +19,13 @@ package org.apache.hadoop.hive.ql.optimizer.physical;
 
 import java.io.Serializable;
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.LinkedHashMap;
 import java.util.LinkedHashSet;
-import java.util.Map;
 import java.util.Set;
 import java.util.Stack;
 
-import org.apache.hadoop.hive.ql.exec.StatsTask;
 import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
+import org.apache.hadoop.hive.ql.exec.StatsTask;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.tez.TezTask;
@@ -115,7 +113,7 @@ public class SerializeFilter implements 
PhysicalPlanResolver {
       Dispatcher disp = null;
       final Set<TableScanOperator> tableScans = new 
LinkedHashSet<TableScanOperator>();
 
-      Map<Rule, NodeProcessor> rules = new HashMap<Rule, NodeProcessor>();
+      LinkedHashMap<Rule, NodeProcessor> rules = new LinkedHashMap<Rule, 
NodeProcessor>();
       rules.put(new RuleRegExp("TS finder",
               TableScanOperator.getOperatorName() + "%"), new NodeProcessor() {
           @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/e8f283cf/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsAutoGatherContext.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsAutoGatherContext.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsAutoGatherContext.java
index fe26283..de0282f 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsAutoGatherContext.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsAutoGatherContext.java
@@ -127,6 +127,7 @@ public class ColumnStatsAutoGatherContext {
   private Operator genSelOpForAnalyze(String analyzeCommand, Context origCtx) 
throws IOException, ParseException, SemanticException{
     //0. initialization
     Context ctx = new Context(conf);
+    ctx.setOpContext(origCtx.getOpContext());
     ctx.setExplainConfig(origCtx.getExplainConfig());
     ASTNode tree = ParseUtils.parse(analyzeCommand, ctx);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e8f283cf/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SplitOpTreeForDPP.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SplitOpTreeForDPP.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SplitOpTreeForDPP.java
index b0d7eec..b9c2c1c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SplitOpTreeForDPP.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SplitOpTreeForDPP.java
@@ -25,7 +25,6 @@ import java.util.List;
 import java.util.Set;
 import java.util.Stack;
 
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.OperatorUtils;
 import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
@@ -37,6 +36,8 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessor;
 import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 
+import com.google.common.base.Preconditions;
+
 
 /**
  * This processor triggers on SparkPartitionPruningSinkOperator. For a 
operator tree like
@@ -100,6 +101,8 @@ public class SplitOpTreeForDPP implements NodeProcessor {
     collectRoots(roots, pruningSinkOp);
 
     Operator<?> branchingOp = pruningSinkOp.getBranchingOp();
+    String marker = "SPARK_DPP_BRANCH_POINT_" + branchingOp.getOperatorId();
+    branchingOp.setMarker(marker);
     List<Operator<?>> savedChildOps = branchingOp.getChildOperators();
     List<Operator<?>> firstNodesOfPruningBranch = 
findFirstNodesOfPruningBranch(branchingOp);
     branchingOp.setChildOperators(null);
@@ -115,7 +118,7 @@ public class SplitOpTreeForDPP implements NodeProcessor {
 
     Operator newBranchingOp = null;
     for (int i = 0; i < newRoots.size() && newBranchingOp == null; i++) {
-      newBranchingOp = OperatorUtils.findOperatorById(newRoots.get(i), 
branchingOp.getOperatorId());
+      newBranchingOp = OperatorUtils.findOperatorByMarker(newRoots.get(i), 
marker);
     }
     Preconditions.checkNotNull(newBranchingOp,
         "Cannot find the branching operator in cloned tree.");

http://git-wip-us.apache.org/repos/asf/hive/blob/e8f283cf/ql/src/java/org/apache/hadoop/hive/ql/plan/MergeJoinWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MergeJoinWork.java 
b/ql/src/java/org/apache/hadoop/hive/ql/plan/MergeJoinWork.java
index 24ce898..45f3347 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MergeJoinWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MergeJoinWork.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.hive.ql.plan;
 
 import java.util.ArrayList;
-import java.util.HashSet;
+import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -49,7 +49,7 @@ public class MergeJoinWork extends BaseWork {
 
   @Override
   public Set<Operator<?>> getAllRootOperators() {
-    Set<Operator<?>> set = new HashSet<>();
+    Set<Operator<?>> set = new LinkedHashSet<>();
     set.addAll(getMainWork().getAllRootOperators());
     for (BaseWork w : mergeWorkList) {
       set.addAll(w.getAllRootOperators());
@@ -92,7 +92,7 @@ public class MergeJoinWork extends BaseWork {
          * output name in the reduce sink needs to be setup appropriately. In 
the case of reduce
          * side merge work, we need to ensure that the parent work that 
provides data to this merge
          * work is setup to point to the right vertex name - the main work 
name.
-         * 
+         *
          * In this case, if the big table work has already been created, we 
can hook up the merge
          * work items for the small table correctly.
          */
@@ -176,6 +176,7 @@ public class MergeJoinWork extends BaseWork {
     return getMainWork().getLlapMode();
   }
 
+  @Override
   public void addDummyOp(HashTableDummyOperator dummyOp) {
     getMainWork().addDummyOp(dummyOp);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/e8f283cf/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestCounterMapping.java
----------------------------------------------------------------------
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestCounterMapping.java 
b/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestCounterMapping.java
index e8a7a1b..b57b5dd 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestCounterMapping.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestCounterMapping.java
@@ -62,7 +62,13 @@ public class TestCounterMapping {
 
     @Override
     public int compare(Operator<?> o1, Operator<?> o2) {
-      return Objects.compare(o1.getOperatorId(), o2.getOperatorId(), 
Comparator.naturalOrder());
+      Long id1 = Long.valueOf(o1.getIdentifier());
+      Long id2 = Long.valueOf(o2.getIdentifier());
+      int c0 = Objects.compare(o1.getOperatorName(), o2.getOperatorName(), 
Comparator.naturalOrder());
+      if (c0 != 0) {
+        return c0;
+      }
+      return Long.compare(id1, id2);
     }
   };
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e8f283cf/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestOperatorCmp.java
----------------------------------------------------------------------
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestOperatorCmp.java 
b/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestOperatorCmp.java
index 3e3e0b0..87d06ae 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestOperatorCmp.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestOperatorCmp.java
@@ -21,7 +21,6 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
-import java.util.Iterator;
 import java.util.List;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
@@ -35,7 +34,6 @@ import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.optimizer.signature.TestOperatorSignature;
 import org.apache.hadoop.hive.ql.parse.ParseException;
 import org.apache.hadoop.hive.ql.plan.mapper.PlanMapper;
-import org.apache.hadoop.hive.ql.plan.mapper.PlanMapper.EquivGroup;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.stats.OperatorStatsReaderHook;
 import org.apache.hive.testutils.HiveTestEnvSetup;
@@ -102,15 +100,19 @@ public class TestOperatorCmp {
     String query = "select u from tu where id_uv = 1 union all select v from 
tv where id_uv = 1";
 
     PlanMapper pm = getMapperForQuery(driver, query);
-    Iterator<EquivGroup> itG = pm.iterateGroups();
     List<FilterOperator> fos = pm.getAll(FilterOperator.class);
     // the same operator is present 2 times
-    fos.sort(TestCounterMapping.OPERATOR_ID_COMPARATOR.reversed());
     assertEquals(4, fos.size());
 
-    assertTrue("logicalEquals", compareOperators(fos.get(0), fos.get(1)));
-    assertFalse("logicalEquals", compareOperators(fos.get(0), fos.get(2)));
-    assertTrue("logicalEquals", compareOperators(fos.get(2), fos.get(3)));
+    int cnt = 0;
+    for (int i = 0; i < 3; i++) {
+      for (int j = i + 1; j < 4; j++) {
+        if (compareOperators(fos.get(i), fos.get(j))) {
+          cnt++;
+        }
+      }
+    }
+    assertEquals(2, cnt);
 
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e8f283cf/ql/src/test/results/clientpositive/auto_join0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join0.q.out 
b/ql/src/test/results/clientpositive/auto_join0.q.out
index da036b7..17fc2fd 100644
--- a/ql/src/test/results/clientpositive/auto_join0.q.out
+++ b/ql/src/test/results/clientpositive/auto_join0.q.out
@@ -1,5 +1,5 @@
-Warning: Map Join MAPJOIN[28][bigTable=?] in task 'Stage-5:MAPRED' is a cross 
product
-Warning: Map Join MAPJOIN[29][bigTable=?] in task 'Stage-6:MAPRED' is a cross 
product
+Warning: Map Join MAPJOIN[35][bigTable=?] in task 'Stage-5:MAPRED' is a cross 
product
+Warning: Map Join MAPJOIN[43][bigTable=?] in task 'Stage-6:MAPRED' is a cross 
product
 Warning: Shuffle Join JOIN[12][tables = [src1, src2]] in Stage 
'Stage-2:MAPRED' is a cross product
 PREHOOK: query: explain 
 select sum(hash(a.k1,a.v1,a.k2, a.v2))
@@ -244,8 +244,8 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[28][bigTable=?] in task 'Stage-5:MAPRED' is a cross 
product
-Warning: Map Join MAPJOIN[29][bigTable=?] in task 'Stage-6:MAPRED' is a cross 
product
+Warning: Map Join MAPJOIN[35][bigTable=?] in task 'Stage-5:MAPRED' is a cross 
product
+Warning: Map Join MAPJOIN[43][bigTable=?] in task 'Stage-6:MAPRED' is a cross 
product
 Warning: Shuffle Join JOIN[12][tables = [src1, src2]] in Stage 
'Stage-2:MAPRED' is a cross product
 PREHOOK: query: select sum(hash(a.k1,a.v1,a.k2, a.v2))
 from (

http://git-wip-us.apache.org/repos/asf/hive/blob/e8f283cf/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out 
b/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out
index 390a679..10a8ae1 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out
@@ -460,8 +460,8 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[31][bigTable=?] in task 'Stage-7:MAPRED' is a cross 
product
-Warning: Map Join MAPJOIN[30][bigTable=?] in task 'Stage-6:MAPRED' is a cross 
product
+Warning: Map Join MAPJOIN[43][bigTable=?] in task 'Stage-7:MAPRED' is a cross 
product
+Warning: Map Join MAPJOIN[36][bigTable=?] in task 'Stage-6:MAPRED' is a cross 
product
 Warning: Shuffle Join JOIN[20][tables = [, ]] in Stage 'Stage-2:MAPRED' is a 
cross product
 PREHOOK: query: explain select * from 
 (select A_n18.key from A_n18 group by key) ss join 

http://git-wip-us.apache.org/repos/asf/hive/blob/e8f283cf/ql/src/test/results/clientpositive/cross_product_check_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cross_product_check_2.q.out 
b/ql/src/test/results/clientpositive/cross_product_check_2.q.out
index 2c62cf0..6143431 100644
--- a/ql/src/test/results/clientpositive/cross_product_check_2.q.out
+++ b/ql/src/test/results/clientpositive/cross_product_check_2.q.out
@@ -452,8 +452,8 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[34][bigTable=?] in task 'Stage-7:MAPRED' is a cross 
product
-Warning: Map Join MAPJOIN[33][bigTable=?] in task 'Stage-6:MAPRED' is a cross 
product
+Warning: Map Join MAPJOIN[46][bigTable=?] in task 'Stage-7:MAPRED' is a cross 
product
+Warning: Map Join MAPJOIN[39][bigTable=?] in task 'Stage-6:MAPRED' is a cross 
product
 Warning: Shuffle Join JOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 
'Stage-2:MAPRED' is a cross product
 PREHOOK: query: explain select * from 
 (select A_n2.key from A_n2 group by key) ss join 

http://git-wip-us.apache.org/repos/asf/hive/blob/e8f283cf/ql/src/test/results/clientpositive/llap/bucketizedhiveinputformat.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/llap/bucketizedhiveinputformat.q.out 
b/ql/src/test/results/clientpositive/llap/bucketizedhiveinputformat.q.out
index 37a9675..d87e3a3 100644
--- a/ql/src/test/results/clientpositive/llap/bucketizedhiveinputformat.q.out
+++ b/ql/src/test/results/clientpositive/llap/bucketizedhiveinputformat.q.out
@@ -22,7 +22,7 @@ POSTHOOK: query: CREATE TABLE T2_n74(name STRING) STORED AS 
SEQUENCEFILE
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@T2_n74
-Warning: Shuffle Join MERGEJOIN[16][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in 
Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[23][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in 
Stage 'Reducer 2' is a cross product
 PREHOOK: query: INSERT OVERWRITE TABLE T2_n74 SELECT * FROM (
 SELECT tmp1.name as name FROM (
   SELECT name, 'MMM' AS n FROM T1_n125) tmp1 

http://git-wip-us.apache.org/repos/asf/hive/blob/e8f283cf/ql/src/test/results/clientpositive/llap/constprog_dpp.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/constprog_dpp.q.out 
b/ql/src/test/results/clientpositive/llap/constprog_dpp.q.out
index 4e9038d..aaa4ecc 100644
--- a/ql/src/test/results/clientpositive/llap/constprog_dpp.q.out
+++ b/ql/src/test/results/clientpositive/llap/constprog_dpp.q.out
@@ -53,35 +53,35 @@ Stage-0
       Reducer 2 llap
       File Output Operator [FS_17]
         Merge Join Operator [MERGEJOIN_21] (rows=1 width=4)
-          Conds:RS_23._col0=RS_26._col0(Left Outer),Output:["_col0"]
+          Conds:RS_33._col0=RS_36._col0(Left Outer),Output:["_col0"]
         <-Map 1 [SIMPLE_EDGE] vectorized, llap
-          SHUFFLE [RS_23]
+          SHUFFLE [RS_33]
             PartitionCols:_col0
-            Select Operator [SEL_22] (rows=1 width=4)
+            Select Operator [SEL_32] (rows=1 width=4)
               Output:["_col0"]
               TableScan [TS_0] (rows=1 width=4)
                 default@tb1,a,Tbl:COMPLETE,Col:NONE,Output:["id"]
         <-Reducer 5 [SIMPLE_EDGE] vectorized, llap
-          SHUFFLE [RS_26]
+          SHUFFLE [RS_36]
             PartitionCols:_col0
-            Limit [LIM_25] (rows=1 width=2)
+            Limit [LIM_35] (rows=1 width=2)
               Number of rows:1
-              Select Operator [SEL_24] (rows=1 width=2)
+              Select Operator [SEL_34] (rows=1 width=2)
                 Output:["_col0"]
               <-Union 4 [CUSTOM_SIMPLE_EDGE]
                 <-Map 3 [CONTAINS] vectorized, llap
-                  Reduce Output Operator [RS_29]
-                    Limit [LIM_28] (rows=1 width=2)
+                  Reduce Output Operator [RS_39]
+                    Limit [LIM_38] (rows=1 width=2)
                       Number of rows:1
-                      Select Operator [SEL_27] (rows=1 width=4)
+                      Select Operator [SEL_37] (rows=1 width=4)
                         Output:["_col0"]
-                        TableScan [TS_2] (rows=1 width=4)
+                        TableScan [TS_22] (rows=1 width=4)
                           Output:["id"]
                 <-Map 6 [CONTAINS] vectorized, llap
-                  Reduce Output Operator [RS_32]
-                    Limit [LIM_31] (rows=1 width=2)
+                  Reduce Output Operator [RS_42]
+                    Limit [LIM_41] (rows=1 width=2)
                       Number of rows:1
-                      Select Operator [SEL_30] (rows=1 width=0)
+                      Select Operator [SEL_40] (rows=1 width=0)
                         Output:["_col0"]
-                        TableScan [TS_4] (rows=1 width=0)
+                        TableScan [TS_27] (rows=1 width=0)
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e8f283cf/ql/src/test/results/clientpositive/llap/dp_counter_mm.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dp_counter_mm.q.out 
b/ql/src/test/results/clientpositive/llap/dp_counter_mm.q.out
index 1be25fe..8f79e6e 100644
--- a/ql/src/test/results/clientpositive/llap/dp_counter_mm.q.out
+++ b/ql/src/test/results/clientpositive/llap/dp_counter_mm.q.out
@@ -24,16 +24,16 @@ Stage-1 HIVE COUNTERS:
    RECORDS_OUT_1_default.src2_n5: 84
    RECORDS_OUT_INTERMEDIATE_Map_1: 57
    RECORDS_OUT_INTERMEDIATE_Reducer_2: 0
-   RECORDS_OUT_OPERATOR_FIL_5: 84
+   RECORDS_OUT_OPERATOR_FIL_12: 84
+   RECORDS_OUT_OPERATOR_FS_11: 57
    RECORDS_OUT_OPERATOR_FS_4: 84
-   RECORDS_OUT_OPERATOR_FS_6: 57
-   RECORDS_OUT_OPERATOR_GBY_2: 57
-   RECORDS_OUT_OPERATOR_GBY_4: 57
+   RECORDS_OUT_OPERATOR_GBY_7: 57
+   RECORDS_OUT_OPERATOR_GBY_9: 57
    RECORDS_OUT_OPERATOR_MAP_0: 0
-   RECORDS_OUT_OPERATOR_RS_3: 57
-   RECORDS_OUT_OPERATOR_SEL_1: 84
+   RECORDS_OUT_OPERATOR_RS_8: 57
+   RECORDS_OUT_OPERATOR_SEL_10: 57
    RECORDS_OUT_OPERATOR_SEL_2: 84
-   RECORDS_OUT_OPERATOR_SEL_5: 57
+   RECORDS_OUT_OPERATOR_SEL_6: 84
    RECORDS_OUT_OPERATOR_TS_0: 500
 Stage-1 INPUT COUNTERS:
    GROUPED_INPUT_SPLITS_Map_1: 1
@@ -54,16 +54,16 @@ Stage-1 HIVE COUNTERS:
    RECORDS_OUT_1_default.src2_n5: 189
    RECORDS_OUT_INTERMEDIATE_Map_1: 121
    RECORDS_OUT_INTERMEDIATE_Reducer_2: 0
-   RECORDS_OUT_OPERATOR_FIL_5: 189
+   RECORDS_OUT_OPERATOR_FIL_12: 189
+   RECORDS_OUT_OPERATOR_FS_11: 121
    RECORDS_OUT_OPERATOR_FS_4: 189
-   RECORDS_OUT_OPERATOR_FS_6: 121
-   RECORDS_OUT_OPERATOR_GBY_2: 121
-   RECORDS_OUT_OPERATOR_GBY_4: 121
+   RECORDS_OUT_OPERATOR_GBY_7: 121
+   RECORDS_OUT_OPERATOR_GBY_9: 121
    RECORDS_OUT_OPERATOR_MAP_0: 0
-   RECORDS_OUT_OPERATOR_RS_3: 121
-   RECORDS_OUT_OPERATOR_SEL_1: 189
+   RECORDS_OUT_OPERATOR_RS_8: 121
+   RECORDS_OUT_OPERATOR_SEL_10: 121
    RECORDS_OUT_OPERATOR_SEL_2: 189
-   RECORDS_OUT_OPERATOR_SEL_5: 121
+   RECORDS_OUT_OPERATOR_SEL_6: 189
    RECORDS_OUT_OPERATOR_TS_0: 500
 Stage-1 INPUT COUNTERS:
    GROUPED_INPUT_SPLITS_Map_1: 1
@@ -92,16 +92,16 @@ Stage-1 HIVE COUNTERS:
    RECORDS_OUT_1_default.src2_n5: 189
    RECORDS_OUT_INTERMEDIATE_Map_1: 121
    RECORDS_OUT_INTERMEDIATE_Reducer_2: 0
-   RECORDS_OUT_OPERATOR_FIL_5: 189
+   RECORDS_OUT_OPERATOR_FIL_12: 189
+   RECORDS_OUT_OPERATOR_FS_11: 121
    RECORDS_OUT_OPERATOR_FS_4: 189
-   RECORDS_OUT_OPERATOR_FS_6: 121
-   RECORDS_OUT_OPERATOR_GBY_2: 121
-   RECORDS_OUT_OPERATOR_GBY_4: 121
+   RECORDS_OUT_OPERATOR_GBY_7: 121
+   RECORDS_OUT_OPERATOR_GBY_9: 121
    RECORDS_OUT_OPERATOR_MAP_0: 0
-   RECORDS_OUT_OPERATOR_RS_3: 121
-   RECORDS_OUT_OPERATOR_SEL_1: 189
+   RECORDS_OUT_OPERATOR_RS_8: 121
+   RECORDS_OUT_OPERATOR_SEL_10: 121
    RECORDS_OUT_OPERATOR_SEL_2: 189
-   RECORDS_OUT_OPERATOR_SEL_5: 121
+   RECORDS_OUT_OPERATOR_SEL_6: 189
    RECORDS_OUT_OPERATOR_TS_0: 500
 Stage-1 INPUT COUNTERS:
    GROUPED_INPUT_SPLITS_Map_1: 1
@@ -122,16 +122,16 @@ Stage-1 HIVE COUNTERS:
    RECORDS_OUT_1_default.src2_n5: 292
    RECORDS_OUT_INTERMEDIATE_Map_1: 184
    RECORDS_OUT_INTERMEDIATE_Reducer_2: 0
-   RECORDS_OUT_OPERATOR_FIL_5: 292
+   RECORDS_OUT_OPERATOR_FIL_12: 292
+   RECORDS_OUT_OPERATOR_FS_11: 184
    RECORDS_OUT_OPERATOR_FS_4: 292
-   RECORDS_OUT_OPERATOR_FS_6: 184
-   RECORDS_OUT_OPERATOR_GBY_2: 184
-   RECORDS_OUT_OPERATOR_GBY_4: 184
+   RECORDS_OUT_OPERATOR_GBY_7: 184
+   RECORDS_OUT_OPERATOR_GBY_9: 184
    RECORDS_OUT_OPERATOR_MAP_0: 0
-   RECORDS_OUT_OPERATOR_RS_3: 184
-   RECORDS_OUT_OPERATOR_SEL_1: 292
+   RECORDS_OUT_OPERATOR_RS_8: 184
+   RECORDS_OUT_OPERATOR_SEL_10: 184
    RECORDS_OUT_OPERATOR_SEL_2: 292
-   RECORDS_OUT_OPERATOR_SEL_5: 184
+   RECORDS_OUT_OPERATOR_SEL_6: 292
    RECORDS_OUT_OPERATOR_TS_0: 500
 Stage-1 INPUT COUNTERS:
    GROUPED_INPUT_SPLITS_Map_1: 1
@@ -171,19 +171,25 @@ Stage-2 HIVE COUNTERS:
    RECORDS_OUT_INTERMEDIATE_Map_1: 121
    RECORDS_OUT_INTERMEDIATE_Reducer_2: 0
    RECORDS_OUT_INTERMEDIATE_Reducer_3: 0
-   RECORDS_OUT_OPERATOR_FIL_10: 105
-   RECORDS_OUT_OPERATOR_FIL_9: 84
+   RECORDS_OUT_OPERATOR_FIL_23: 84
+   RECORDS_OUT_OPERATOR_FIL_24: 105
+   RECORDS_OUT_OPERATOR_FS_11: 57
+   RECORDS_OUT_OPERATOR_FS_15: 105
+   RECORDS_OUT_OPERATOR_FS_22: 64
    RECORDS_OUT_OPERATOR_FS_4: 84
-   RECORDS_OUT_OPERATOR_FS_6: 121
-   RECORDS_OUT_OPERATOR_FS_8: 105
-   RECORDS_OUT_OPERATOR_GBY_2: 121
-   RECORDS_OUT_OPERATOR_GBY_4: 121
+   RECORDS_OUT_OPERATOR_GBY_18: 64
+   RECORDS_OUT_OPERATOR_GBY_20: 64
+   RECORDS_OUT_OPERATOR_GBY_7: 57
+   RECORDS_OUT_OPERATOR_GBY_9: 57
    RECORDS_OUT_OPERATOR_MAP_0: 0
-   RECORDS_OUT_OPERATOR_RS_3: 121
-   RECORDS_OUT_OPERATOR_SEL_1: 189
+   RECORDS_OUT_OPERATOR_RS_19: 64
+   RECORDS_OUT_OPERATOR_RS_8: 57
+   RECORDS_OUT_OPERATOR_SEL_10: 57
+   RECORDS_OUT_OPERATOR_SEL_13: 105
+   RECORDS_OUT_OPERATOR_SEL_17: 105
    RECORDS_OUT_OPERATOR_SEL_2: 84
-   RECORDS_OUT_OPERATOR_SEL_5: 121
-   RECORDS_OUT_OPERATOR_SEL_6: 105
+   RECORDS_OUT_OPERATOR_SEL_21: 64
+   RECORDS_OUT_OPERATOR_SEL_6: 84
    RECORDS_OUT_OPERATOR_TS_0: 500
 Stage-2 INPUT COUNTERS:
    GROUPED_INPUT_SPLITS_Map_1: 1
@@ -209,19 +215,25 @@ Stage-2 HIVE COUNTERS:
    RECORDS_OUT_INTERMEDIATE_Map_1: 184
    RECORDS_OUT_INTERMEDIATE_Reducer_2: 0
    RECORDS_OUT_INTERMEDIATE_Reducer_3: 0
-   RECORDS_OUT_OPERATOR_FIL_10: 208
-   RECORDS_OUT_OPERATOR_FIL_9: 84
+   RECORDS_OUT_OPERATOR_FIL_23: 84
+   RECORDS_OUT_OPERATOR_FIL_24: 208
+   RECORDS_OUT_OPERATOR_FS_11: 57
+   RECORDS_OUT_OPERATOR_FS_15: 208
+   RECORDS_OUT_OPERATOR_FS_22: 127
    RECORDS_OUT_OPERATOR_FS_4: 84
-   RECORDS_OUT_OPERATOR_FS_6: 184
-   RECORDS_OUT_OPERATOR_FS_8: 208
-   RECORDS_OUT_OPERATOR_GBY_2: 184
-   RECORDS_OUT_OPERATOR_GBY_4: 184
+   RECORDS_OUT_OPERATOR_GBY_18: 127
+   RECORDS_OUT_OPERATOR_GBY_20: 127
+   RECORDS_OUT_OPERATOR_GBY_7: 57
+   RECORDS_OUT_OPERATOR_GBY_9: 57
    RECORDS_OUT_OPERATOR_MAP_0: 0
-   RECORDS_OUT_OPERATOR_RS_3: 184
-   RECORDS_OUT_OPERATOR_SEL_1: 292
+   RECORDS_OUT_OPERATOR_RS_19: 127
+   RECORDS_OUT_OPERATOR_RS_8: 57
+   RECORDS_OUT_OPERATOR_SEL_10: 57
+   RECORDS_OUT_OPERATOR_SEL_13: 208
+   RECORDS_OUT_OPERATOR_SEL_17: 208
    RECORDS_OUT_OPERATOR_SEL_2: 84
-   RECORDS_OUT_OPERATOR_SEL_5: 184
-   RECORDS_OUT_OPERATOR_SEL_6: 208
+   RECORDS_OUT_OPERATOR_SEL_21: 127
+   RECORDS_OUT_OPERATOR_SEL_6: 84
    RECORDS_OUT_OPERATOR_TS_0: 500
 Stage-2 INPUT COUNTERS:
    GROUPED_INPUT_SPLITS_Map_1: 1
@@ -256,20 +268,26 @@ Stage-1 HIVE COUNTERS:
    RECORDS_OUT_INTERMEDIATE_Map_1: 57
    RECORDS_OUT_INTERMEDIATE_Map_4: 64
    RECORDS_OUT_INTERMEDIATE_Reducer_3: 0
-   RECORDS_OUT_OPERATOR_FIL_10: 84
-   RECORDS_OUT_OPERATOR_FIL_11: 105
-   RECORDS_OUT_OPERATOR_FS_6: 121
-   RECORDS_OUT_OPERATOR_FS_9: 189
-   RECORDS_OUT_OPERATOR_GBY_2: 121
-   RECORDS_OUT_OPERATOR_GBY_4: 121
+   RECORDS_OUT_OPERATOR_FIL_20: 84
+   RECORDS_OUT_OPERATOR_FIL_29: 105
+   RECORDS_OUT_OPERATOR_FS_16: 121
+   RECORDS_OUT_OPERATOR_FS_24: 84
+   RECORDS_OUT_OPERATOR_FS_33: 105
+   RECORDS_OUT_OPERATOR_GBY_14: 121
+   RECORDS_OUT_OPERATOR_GBY_26: 57
+   RECORDS_OUT_OPERATOR_GBY_35: 64
    RECORDS_OUT_OPERATOR_MAP_0: 0
-   RECORDS_OUT_OPERATOR_RS_3: 121
-   RECORDS_OUT_OPERATOR_SEL_1: 189
-   RECORDS_OUT_OPERATOR_SEL_2: 84
-   RECORDS_OUT_OPERATOR_SEL_5: 226
-   RECORDS_OUT_OPERATOR_SEL_7: 189
-   RECORDS_OUT_OPERATOR_TS_0: 500
-   RECORDS_OUT_OPERATOR_TS_3: 500
+   RECORDS_OUT_OPERATOR_RS_27: 57
+   RECORDS_OUT_OPERATOR_RS_36: 64
+   RECORDS_OUT_OPERATOR_SEL_15: 121
+   RECORDS_OUT_OPERATOR_SEL_21: 84
+   RECORDS_OUT_OPERATOR_SEL_23: 84
+   RECORDS_OUT_OPERATOR_SEL_25: 84
+   RECORDS_OUT_OPERATOR_SEL_30: 105
+   RECORDS_OUT_OPERATOR_SEL_32: 105
+   RECORDS_OUT_OPERATOR_SEL_34: 105
+   RECORDS_OUT_OPERATOR_TS_19: 500
+   RECORDS_OUT_OPERATOR_TS_28: 500
 Stage-1 INPUT COUNTERS:
    GROUPED_INPUT_SPLITS_Map_1: 1
    GROUPED_INPUT_SPLITS_Map_4: 1
@@ -299,20 +317,26 @@ Stage-1 HIVE COUNTERS:
    RECORDS_OUT_INTERMEDIATE_Map_1: 57
    RECORDS_OUT_INTERMEDIATE_Map_4: 127
    RECORDS_OUT_INTERMEDIATE_Reducer_3: 0
-   RECORDS_OUT_OPERATOR_FIL_10: 84
-   RECORDS_OUT_OPERATOR_FIL_11: 208
-   RECORDS_OUT_OPERATOR_FS_6: 184
-   RECORDS_OUT_OPERATOR_FS_9: 292
-   RECORDS_OUT_OPERATOR_GBY_2: 184
-   RECORDS_OUT_OPERATOR_GBY_4: 184
+   RECORDS_OUT_OPERATOR_FIL_20: 84
+   RECORDS_OUT_OPERATOR_FIL_29: 208
+   RECORDS_OUT_OPERATOR_FS_16: 184
+   RECORDS_OUT_OPERATOR_FS_24: 84
+   RECORDS_OUT_OPERATOR_FS_33: 208
+   RECORDS_OUT_OPERATOR_GBY_14: 184
+   RECORDS_OUT_OPERATOR_GBY_26: 57
+   RECORDS_OUT_OPERATOR_GBY_35: 127
    RECORDS_OUT_OPERATOR_MAP_0: 0
-   RECORDS_OUT_OPERATOR_RS_3: 184
-   RECORDS_OUT_OPERATOR_SEL_1: 292
-   RECORDS_OUT_OPERATOR_SEL_2: 84
-   RECORDS_OUT_OPERATOR_SEL_5: 392
-   RECORDS_OUT_OPERATOR_SEL_7: 292
-   RECORDS_OUT_OPERATOR_TS_0: 500
-   RECORDS_OUT_OPERATOR_TS_3: 500
+   RECORDS_OUT_OPERATOR_RS_27: 57
+   RECORDS_OUT_OPERATOR_RS_36: 127
+   RECORDS_OUT_OPERATOR_SEL_15: 184
+   RECORDS_OUT_OPERATOR_SEL_21: 84
+   RECORDS_OUT_OPERATOR_SEL_23: 84
+   RECORDS_OUT_OPERATOR_SEL_25: 84
+   RECORDS_OUT_OPERATOR_SEL_30: 208
+   RECORDS_OUT_OPERATOR_SEL_32: 208
+   RECORDS_OUT_OPERATOR_SEL_34: 208
+   RECORDS_OUT_OPERATOR_TS_19: 500
+   RECORDS_OUT_OPERATOR_TS_28: 500
 Stage-1 INPUT COUNTERS:
    GROUPED_INPUT_SPLITS_Map_1: 1
    GROUPED_INPUT_SPLITS_Map_4: 1

http://git-wip-us.apache.org/repos/asf/hive/blob/e8f283cf/ql/src/test/results/clientpositive/llap/dp_counter_non_mm.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dp_counter_non_mm.q.out 
b/ql/src/test/results/clientpositive/llap/dp_counter_non_mm.q.out
index c0d2141..5b1769a 100644
--- a/ql/src/test/results/clientpositive/llap/dp_counter_non_mm.q.out
+++ b/ql/src/test/results/clientpositive/llap/dp_counter_non_mm.q.out
@@ -24,16 +24,16 @@ Stage-1 HIVE COUNTERS:
    RECORDS_OUT_1_default.src2_n3: 84
    RECORDS_OUT_INTERMEDIATE_Map_1: 57
    RECORDS_OUT_INTERMEDIATE_Reducer_2: 0
-   RECORDS_OUT_OPERATOR_FIL_5: 84
+   RECORDS_OUT_OPERATOR_FIL_12: 84
+   RECORDS_OUT_OPERATOR_FS_11: 57
    RECORDS_OUT_OPERATOR_FS_4: 84
-   RECORDS_OUT_OPERATOR_FS_6: 57
-   RECORDS_OUT_OPERATOR_GBY_2: 57
-   RECORDS_OUT_OPERATOR_GBY_4: 57
+   RECORDS_OUT_OPERATOR_GBY_7: 57
+   RECORDS_OUT_OPERATOR_GBY_9: 57
    RECORDS_OUT_OPERATOR_MAP_0: 0
-   RECORDS_OUT_OPERATOR_RS_3: 57
-   RECORDS_OUT_OPERATOR_SEL_1: 84
+   RECORDS_OUT_OPERATOR_RS_8: 57
+   RECORDS_OUT_OPERATOR_SEL_10: 57
    RECORDS_OUT_OPERATOR_SEL_2: 84
-   RECORDS_OUT_OPERATOR_SEL_5: 57
+   RECORDS_OUT_OPERATOR_SEL_6: 84
    RECORDS_OUT_OPERATOR_TS_0: 500
 Stage-1 INPUT COUNTERS:
    GROUPED_INPUT_SPLITS_Map_1: 1
@@ -54,16 +54,16 @@ Stage-1 HIVE COUNTERS:
    RECORDS_OUT_1_default.src2_n3: 189
    RECORDS_OUT_INTERMEDIATE_Map_1: 121
    RECORDS_OUT_INTERMEDIATE_Reducer_2: 0
-   RECORDS_OUT_OPERATOR_FIL_5: 189
+   RECORDS_OUT_OPERATOR_FIL_12: 189
+   RECORDS_OUT_OPERATOR_FS_11: 121
    RECORDS_OUT_OPERATOR_FS_4: 189
-   RECORDS_OUT_OPERATOR_FS_6: 121
-   RECORDS_OUT_OPERATOR_GBY_2: 121
-   RECORDS_OUT_OPERATOR_GBY_4: 121
+   RECORDS_OUT_OPERATOR_GBY_7: 121
+   RECORDS_OUT_OPERATOR_GBY_9: 121
    RECORDS_OUT_OPERATOR_MAP_0: 0
-   RECORDS_OUT_OPERATOR_RS_3: 121
-   RECORDS_OUT_OPERATOR_SEL_1: 189
+   RECORDS_OUT_OPERATOR_RS_8: 121
+   RECORDS_OUT_OPERATOR_SEL_10: 121
    RECORDS_OUT_OPERATOR_SEL_2: 189
-   RECORDS_OUT_OPERATOR_SEL_5: 121
+   RECORDS_OUT_OPERATOR_SEL_6: 189
    RECORDS_OUT_OPERATOR_TS_0: 500
 Stage-1 INPUT COUNTERS:
    GROUPED_INPUT_SPLITS_Map_1: 1
@@ -92,16 +92,16 @@ Stage-1 HIVE COUNTERS:
    RECORDS_OUT_1_default.src2_n3: 189
    RECORDS_OUT_INTERMEDIATE_Map_1: 121
    RECORDS_OUT_INTERMEDIATE_Reducer_2: 0
-   RECORDS_OUT_OPERATOR_FIL_5: 189
+   RECORDS_OUT_OPERATOR_FIL_12: 189
+   RECORDS_OUT_OPERATOR_FS_11: 121
    RECORDS_OUT_OPERATOR_FS_4: 189
-   RECORDS_OUT_OPERATOR_FS_6: 121
-   RECORDS_OUT_OPERATOR_GBY_2: 121
-   RECORDS_OUT_OPERATOR_GBY_4: 121
+   RECORDS_OUT_OPERATOR_GBY_7: 121
+   RECORDS_OUT_OPERATOR_GBY_9: 121
    RECORDS_OUT_OPERATOR_MAP_0: 0
-   RECORDS_OUT_OPERATOR_RS_3: 121
-   RECORDS_OUT_OPERATOR_SEL_1: 189
+   RECORDS_OUT_OPERATOR_RS_8: 121
+   RECORDS_OUT_OPERATOR_SEL_10: 121
    RECORDS_OUT_OPERATOR_SEL_2: 189
-   RECORDS_OUT_OPERATOR_SEL_5: 121
+   RECORDS_OUT_OPERATOR_SEL_6: 189
    RECORDS_OUT_OPERATOR_TS_0: 500
 Stage-1 INPUT COUNTERS:
    GROUPED_INPUT_SPLITS_Map_1: 1
@@ -122,16 +122,16 @@ Stage-1 HIVE COUNTERS:
    RECORDS_OUT_1_default.src2_n3: 292
    RECORDS_OUT_INTERMEDIATE_Map_1: 184
    RECORDS_OUT_INTERMEDIATE_Reducer_2: 0
-   RECORDS_OUT_OPERATOR_FIL_5: 292
+   RECORDS_OUT_OPERATOR_FIL_12: 292
+   RECORDS_OUT_OPERATOR_FS_11: 184
    RECORDS_OUT_OPERATOR_FS_4: 292
-   RECORDS_OUT_OPERATOR_FS_6: 184
-   RECORDS_OUT_OPERATOR_GBY_2: 184
-   RECORDS_OUT_OPERATOR_GBY_4: 184
+   RECORDS_OUT_OPERATOR_GBY_7: 184
+   RECORDS_OUT_OPERATOR_GBY_9: 184
    RECORDS_OUT_OPERATOR_MAP_0: 0
-   RECORDS_OUT_OPERATOR_RS_3: 184
-   RECORDS_OUT_OPERATOR_SEL_1: 292
+   RECORDS_OUT_OPERATOR_RS_8: 184
+   RECORDS_OUT_OPERATOR_SEL_10: 184
    RECORDS_OUT_OPERATOR_SEL_2: 292
-   RECORDS_OUT_OPERATOR_SEL_5: 184
+   RECORDS_OUT_OPERATOR_SEL_6: 292
    RECORDS_OUT_OPERATOR_TS_0: 500
 Stage-1 INPUT COUNTERS:
    GROUPED_INPUT_SPLITS_Map_1: 1
@@ -171,19 +171,25 @@ Stage-2 HIVE COUNTERS:
    RECORDS_OUT_INTERMEDIATE_Map_1: 121
    RECORDS_OUT_INTERMEDIATE_Reducer_2: 0
    RECORDS_OUT_INTERMEDIATE_Reducer_3: 0
-   RECORDS_OUT_OPERATOR_FIL_10: 105
-   RECORDS_OUT_OPERATOR_FIL_9: 84
+   RECORDS_OUT_OPERATOR_FIL_23: 84
+   RECORDS_OUT_OPERATOR_FIL_24: 105
+   RECORDS_OUT_OPERATOR_FS_11: 57
+   RECORDS_OUT_OPERATOR_FS_15: 105
+   RECORDS_OUT_OPERATOR_FS_22: 64
    RECORDS_OUT_OPERATOR_FS_4: 84
-   RECORDS_OUT_OPERATOR_FS_6: 121
-   RECORDS_OUT_OPERATOR_FS_8: 105
-   RECORDS_OUT_OPERATOR_GBY_2: 121
-   RECORDS_OUT_OPERATOR_GBY_4: 121
+   RECORDS_OUT_OPERATOR_GBY_18: 64
+   RECORDS_OUT_OPERATOR_GBY_20: 64
+   RECORDS_OUT_OPERATOR_GBY_7: 57
+   RECORDS_OUT_OPERATOR_GBY_9: 57
    RECORDS_OUT_OPERATOR_MAP_0: 0
-   RECORDS_OUT_OPERATOR_RS_3: 121
-   RECORDS_OUT_OPERATOR_SEL_1: 189
+   RECORDS_OUT_OPERATOR_RS_19: 64
+   RECORDS_OUT_OPERATOR_RS_8: 57
+   RECORDS_OUT_OPERATOR_SEL_10: 57
+   RECORDS_OUT_OPERATOR_SEL_13: 105
+   RECORDS_OUT_OPERATOR_SEL_17: 105
    RECORDS_OUT_OPERATOR_SEL_2: 84
-   RECORDS_OUT_OPERATOR_SEL_5: 121
-   RECORDS_OUT_OPERATOR_SEL_6: 105
+   RECORDS_OUT_OPERATOR_SEL_21: 64
+   RECORDS_OUT_OPERATOR_SEL_6: 84
    RECORDS_OUT_OPERATOR_TS_0: 500
 Stage-2 INPUT COUNTERS:
    GROUPED_INPUT_SPLITS_Map_1: 1
@@ -209,19 +215,25 @@ Stage-2 HIVE COUNTERS:
    RECORDS_OUT_INTERMEDIATE_Map_1: 184
    RECORDS_OUT_INTERMEDIATE_Reducer_2: 0
    RECORDS_OUT_INTERMEDIATE_Reducer_3: 0
-   RECORDS_OUT_OPERATOR_FIL_10: 208
-   RECORDS_OUT_OPERATOR_FIL_9: 84
+   RECORDS_OUT_OPERATOR_FIL_23: 84
+   RECORDS_OUT_OPERATOR_FIL_24: 208
+   RECORDS_OUT_OPERATOR_FS_11: 57
+   RECORDS_OUT_OPERATOR_FS_15: 208
+   RECORDS_OUT_OPERATOR_FS_22: 127
    RECORDS_OUT_OPERATOR_FS_4: 84
-   RECORDS_OUT_OPERATOR_FS_6: 184
-   RECORDS_OUT_OPERATOR_FS_8: 208
-   RECORDS_OUT_OPERATOR_GBY_2: 184
-   RECORDS_OUT_OPERATOR_GBY_4: 184
+   RECORDS_OUT_OPERATOR_GBY_18: 127
+   RECORDS_OUT_OPERATOR_GBY_20: 127
+   RECORDS_OUT_OPERATOR_GBY_7: 57
+   RECORDS_OUT_OPERATOR_GBY_9: 57
    RECORDS_OUT_OPERATOR_MAP_0: 0
-   RECORDS_OUT_OPERATOR_RS_3: 184
-   RECORDS_OUT_OPERATOR_SEL_1: 292
+   RECORDS_OUT_OPERATOR_RS_19: 127
+   RECORDS_OUT_OPERATOR_RS_8: 57
+   RECORDS_OUT_OPERATOR_SEL_10: 57
+   RECORDS_OUT_OPERATOR_SEL_13: 208
+   RECORDS_OUT_OPERATOR_SEL_17: 208
    RECORDS_OUT_OPERATOR_SEL_2: 84
-   RECORDS_OUT_OPERATOR_SEL_5: 184
-   RECORDS_OUT_OPERATOR_SEL_6: 208
+   RECORDS_OUT_OPERATOR_SEL_21: 127
+   RECORDS_OUT_OPERATOR_SEL_6: 84
    RECORDS_OUT_OPERATOR_TS_0: 500
 Stage-2 INPUT COUNTERS:
    GROUPED_INPUT_SPLITS_Map_1: 1
@@ -256,20 +268,26 @@ Stage-1 HIVE COUNTERS:
    RECORDS_OUT_INTERMEDIATE_Map_1: 57
    RECORDS_OUT_INTERMEDIATE_Map_4: 64
    RECORDS_OUT_INTERMEDIATE_Reducer_3: 0
-   RECORDS_OUT_OPERATOR_FIL_10: 84
-   RECORDS_OUT_OPERATOR_FIL_11: 105
-   RECORDS_OUT_OPERATOR_FS_6: 121
-   RECORDS_OUT_OPERATOR_FS_9: 189
-   RECORDS_OUT_OPERATOR_GBY_2: 121
-   RECORDS_OUT_OPERATOR_GBY_4: 121
+   RECORDS_OUT_OPERATOR_FIL_20: 84
+   RECORDS_OUT_OPERATOR_FIL_29: 105
+   RECORDS_OUT_OPERATOR_FS_16: 121
+   RECORDS_OUT_OPERATOR_FS_24: 84
+   RECORDS_OUT_OPERATOR_FS_33: 105
+   RECORDS_OUT_OPERATOR_GBY_14: 121
+   RECORDS_OUT_OPERATOR_GBY_26: 57
+   RECORDS_OUT_OPERATOR_GBY_35: 64
    RECORDS_OUT_OPERATOR_MAP_0: 0
-   RECORDS_OUT_OPERATOR_RS_3: 121
-   RECORDS_OUT_OPERATOR_SEL_1: 189
-   RECORDS_OUT_OPERATOR_SEL_2: 84
-   RECORDS_OUT_OPERATOR_SEL_5: 226
-   RECORDS_OUT_OPERATOR_SEL_7: 189
-   RECORDS_OUT_OPERATOR_TS_0: 500
-   RECORDS_OUT_OPERATOR_TS_3: 500
+   RECORDS_OUT_OPERATOR_RS_27: 57
+   RECORDS_OUT_OPERATOR_RS_36: 64
+   RECORDS_OUT_OPERATOR_SEL_15: 121
+   RECORDS_OUT_OPERATOR_SEL_21: 84
+   RECORDS_OUT_OPERATOR_SEL_23: 84
+   RECORDS_OUT_OPERATOR_SEL_25: 84
+   RECORDS_OUT_OPERATOR_SEL_30: 105
+   RECORDS_OUT_OPERATOR_SEL_32: 105
+   RECORDS_OUT_OPERATOR_SEL_34: 105
+   RECORDS_OUT_OPERATOR_TS_19: 500
+   RECORDS_OUT_OPERATOR_TS_28: 500
 Stage-1 INPUT COUNTERS:
    GROUPED_INPUT_SPLITS_Map_1: 1
    GROUPED_INPUT_SPLITS_Map_4: 1
@@ -299,20 +317,26 @@ Stage-1 HIVE COUNTERS:
    RECORDS_OUT_INTERMEDIATE_Map_1: 57
    RECORDS_OUT_INTERMEDIATE_Map_4: 127
    RECORDS_OUT_INTERMEDIATE_Reducer_3: 0
-   RECORDS_OUT_OPERATOR_FIL_10: 84
-   RECORDS_OUT_OPERATOR_FIL_11: 208
-   RECORDS_OUT_OPERATOR_FS_6: 184
-   RECORDS_OUT_OPERATOR_FS_9: 292
-   RECORDS_OUT_OPERATOR_GBY_2: 184
-   RECORDS_OUT_OPERATOR_GBY_4: 184
+   RECORDS_OUT_OPERATOR_FIL_20: 84
+   RECORDS_OUT_OPERATOR_FIL_29: 208
+   RECORDS_OUT_OPERATOR_FS_16: 184
+   RECORDS_OUT_OPERATOR_FS_24: 84
+   RECORDS_OUT_OPERATOR_FS_33: 208
+   RECORDS_OUT_OPERATOR_GBY_14: 184
+   RECORDS_OUT_OPERATOR_GBY_26: 57
+   RECORDS_OUT_OPERATOR_GBY_35: 127
    RECORDS_OUT_OPERATOR_MAP_0: 0
-   RECORDS_OUT_OPERATOR_RS_3: 184
-   RECORDS_OUT_OPERATOR_SEL_1: 292
-   RECORDS_OUT_OPERATOR_SEL_2: 84
-   RECORDS_OUT_OPERATOR_SEL_5: 392
-   RECORDS_OUT_OPERATOR_SEL_7: 292
-   RECORDS_OUT_OPERATOR_TS_0: 500
-   RECORDS_OUT_OPERATOR_TS_3: 500
+   RECORDS_OUT_OPERATOR_RS_27: 57
+   RECORDS_OUT_OPERATOR_RS_36: 127
+   RECORDS_OUT_OPERATOR_SEL_15: 184
+   RECORDS_OUT_OPERATOR_SEL_21: 84
+   RECORDS_OUT_OPERATOR_SEL_23: 84
+   RECORDS_OUT_OPERATOR_SEL_25: 84
+   RECORDS_OUT_OPERATOR_SEL_30: 208
+   RECORDS_OUT_OPERATOR_SEL_32: 208
+   RECORDS_OUT_OPERATOR_SEL_34: 208
+   RECORDS_OUT_OPERATOR_TS_19: 500
+   RECORDS_OUT_OPERATOR_TS_28: 500
 Stage-1 INPUT COUNTERS:
    GROUPED_INPUT_SPLITS_Map_1: 1
    GROUPED_INPUT_SPLITS_Map_4: 1

http://git-wip-us.apache.org/repos/asf/hive/blob/e8f283cf/ql/src/test/results/clientpositive/llap/dynamic_semijoin_user_level.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/llap/dynamic_semijoin_user_level.q.out 
b/ql/src/test/results/clientpositive/llap/dynamic_semijoin_user_level.q.out
index c3ef505..aa65613 100644
--- a/ql/src/test/results/clientpositive/llap/dynamic_semijoin_user_level.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynamic_semijoin_user_level.q.out
@@ -1450,22 +1450,22 @@ Stage-0
           Conds:SEL_2._col1=Union 
3._col0(Inner),Output:["_col0","_col1","_col2"]
         <-Union 3 [BROADCAST_EDGE]
           <-Map 2 [CONTAINS] llap
-            Reduce Output Operator [RS_12]
+            Reduce Output Operator [RS_39]
               PartitionCols:_col0
-              Select Operator [SEL_5] (rows=2000 width=87)
+              Select Operator [SEL_37] (rows=2000 width=87)
                 Output:["_col0"]
-                Filter Operator [FIL_20] (rows=2000 width=87)
+                Filter Operator [FIL_36] (rows=2000 width=87)
                   predicate:key is not null
-                  TableScan [TS_3] (rows=2000 width=87)
+                  TableScan [TS_35] (rows=2000 width=87)
                     Output:["key"]
           <-Map 4 [CONTAINS] llap
-            Reduce Output Operator [RS_12]
+            Reduce Output Operator [RS_44]
               PartitionCols:_col0
-              Select Operator [SEL_8] (rows=20 width=87)
+              Select Operator [SEL_42] (rows=20 width=87)
                 Output:["_col0"]
-                Filter Operator [FIL_21] (rows=20 width=87)
+                Filter Operator [FIL_41] (rows=20 width=87)
                   predicate:key1 is not null
-                  TableScan [TS_6] (rows=20 width=87)
+                  TableScan [TS_40] (rows=20 width=87)
                     Output:["key1"]
         <-Select Operator [SEL_2] (rows=9174 width=73)
             Output:["_col0","_col1"]

Reply via email to