HIVE-11037: HiveOnTez: make explain user level = true as default (Pengcheng 
Xiong via Laljo John Pullokkaran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8842dcaf
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8842dcaf
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8842dcaf

Branch: refs/heads/beeline-cli
Commit: 8842dcaf0074fc3e7f9aad6cf9301da03ccd1621
Parents: d81c41c
Author: jpullokk <[email protected]>
Authored: Tue Jun 23 13:21:47 2015 -0700
Committer: jpullokk <[email protected]>
Committed: Tue Jun 23 13:21:47 2015 -0700

----------------------------------------------------------------------
 .../hive/common/jsonexplain/tez/Attr.java       |   6 +-
 .../hive/common/jsonexplain/tez/Connection.java |   6 +-
 .../hadoop/hive/common/jsonexplain/tez/Op.java  |  64 +--
 .../hive/common/jsonexplain/tez/Printer.java    |  41 ++
 .../hive/common/jsonexplain/tez/Stage.java      |  95 ++--
 .../common/jsonexplain/tez/TezJsonParser.java   |  61 +--
 .../hive/common/jsonexplain/tez/Vertex.java     |  75 +--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   2 +-
 .../test/resources/testconfiguration.properties |   1 +
 .../apache/hadoop/hive/ql/exec/ExplainTask.java |  48 +-
 .../hive/ql/parse/ExplainSemanticAnalyzer.java  |   9 +-
 .../hadoop/hive/ql/plan/ConditionalWork.java    |   4 +-
 ql/src/test/queries/clientpositive/auto_join0.q |   1 +
 ql/src/test/queries/clientpositive/auto_join1.q |   1 +
 .../test/queries/clientpositive/auto_join21.q   |   1 +
 .../test/queries/clientpositive/auto_join29.q   |   1 +
 .../test/queries/clientpositive/auto_join30.q   |   1 +
 .../clientpositive/auto_sortmerge_join_10.q     |   1 +
 .../clientpositive/auto_sortmerge_join_13.q     |   1 +
 .../clientpositive/auto_sortmerge_join_14.q     |   1 +
 .../clientpositive/auto_sortmerge_join_15.q     |   1 +
 .../clientpositive/auto_sortmerge_join_9.q      |   1 +
 ql/src/test/queries/clientpositive/bucket2.q    |   1 +
 ql/src/test/queries/clientpositive/bucket3.q    |   1 +
 ql/src/test/queries/clientpositive/bucket4.q    |   1 +
 .../clientpositive/bucket_map_join_tez1.q       |   1 +
 .../clientpositive/bucket_map_join_tez2.q       |   1 +
 .../clientpositive/correlationoptimizer1.q      |   1 +
 ql/src/test/queries/clientpositive/count.q      |   1 +
 ql/src/test/queries/clientpositive/cross_join.q |   1 +
 .../clientpositive/cross_product_check_1.q      |   1 +
 .../clientpositive/cross_product_check_2.q      |   1 +
 ql/src/test/queries/clientpositive/ctas.q       |   1 +
 .../disable_merge_for_bucketing.q               |   1 +
 .../clientpositive/dynamic_partition_pruning.q  |   1 +
 .../dynamic_partition_pruning_2.q               |   1 +
 .../dynpart_sort_opt_vectorization.q            |   1 +
 .../clientpositive/dynpart_sort_optimization.q  |   1 +
 .../clientpositive/dynpart_sort_optimization2.q |   1 +
 .../test/queries/clientpositive/explainuser_3.q | 115 ++++
 ql/src/test/queries/clientpositive/groupby1.q   |   1 +
 ql/src/test/queries/clientpositive/groupby2.q   |   1 +
 ql/src/test/queries/clientpositive/groupby3.q   |   1 +
 ql/src/test/queries/clientpositive/having.q     |   1 +
 .../clientpositive/hybridgrace_hashjoin_1.q     |   1 +
 .../clientpositive/hybridgrace_hashjoin_2.q     |   1 +
 .../test/queries/clientpositive/insert_into1.q  |   1 +
 .../test/queries/clientpositive/insert_into2.q  |   1 +
 ql/src/test/queries/clientpositive/join0.q      |   1 +
 ql/src/test/queries/clientpositive/join1.q      |   1 +
 .../test/queries/clientpositive/join_nullsafe.q |   1 +
 .../queries/clientpositive/limit_pushdown.q     |   1 +
 .../queries/clientpositive/load_dyn_part1.q     |   1 +
 .../queries/clientpositive/load_dyn_part2.q     |   1 +
 .../queries/clientpositive/load_dyn_part3.q     |   1 +
 .../test/queries/clientpositive/lvj_mapjoin.q   |   1 +
 .../queries/clientpositive/mapjoin_decimal.q    |   1 +
 .../queries/clientpositive/mapjoin_mapjoin.q    |   1 +
 ql/src/test/queries/clientpositive/mapreduce1.q |   1 +
 ql/src/test/queries/clientpositive/mapreduce2.q |   1 +
 ql/src/test/queries/clientpositive/merge1.q     |   1 +
 ql/src/test/queries/clientpositive/merge2.q     |   1 +
 ql/src/test/queries/clientpositive/mergejoin.q  |   1 +
 .../clientpositive/metadata_only_queries.q      |   1 +
 ql/src/test/queries/clientpositive/mrr.q        |   1 +
 ql/src/test/queries/clientpositive/orc_merge1.q |   1 +
 ql/src/test/queries/clientpositive/orc_merge2.q |   1 +
 ql/src/test/queries/clientpositive/orc_merge3.q |   1 +
 ql/src/test/queries/clientpositive/orc_merge4.q |   1 +
 ql/src/test/queries/clientpositive/orc_merge5.q |   1 +
 ql/src/test/queries/clientpositive/orc_merge6.q |   1 +
 ql/src/test/queries/clientpositive/orc_merge7.q |   1 +
 .../clientpositive/orc_merge_incompat1.q        |   1 +
 .../clientpositive/orc_merge_incompat2.q        |   1 +
 ql/src/test/queries/clientpositive/parallel.q   |   1 +
 ql/src/test/queries/clientpositive/ptf.q        |   1 +
 .../test/queries/clientpositive/ptf_matchpath.q |   1 +
 .../test/queries/clientpositive/ptf_streaming.q |   1 +
 .../test/queries/clientpositive/script_pipe.q   |   1 +
 .../queries/clientpositive/selectDistinctStar.q |   1 +
 .../clientpositive/select_dummy_source.q        |   1 +
 ql/src/test/queries/clientpositive/skewjoin.q   |   1 +
 .../queries/clientpositive/stats_noscan_1.q     |   1 +
 .../queries/clientpositive/stats_only_null.q    |   1 +
 .../queries/clientpositive/subquery_exists.q    |   1 +
 .../test/queries/clientpositive/subquery_in.q   |   1 +
 ql/src/test/queries/clientpositive/temp_table.q |   1 +
 .../clientpositive/tez_bmj_schema_evolution.q   |   1 +
 ql/src/test/queries/clientpositive/tez_dml.q    |   1 +
 ql/src/test/queries/clientpositive/tez_join.q   |   1 +
 .../test/queries/clientpositive/tez_join_hash.q |   1 +
 .../queries/clientpositive/tez_join_tests.q     |   1 +
 .../queries/clientpositive/tez_joins_explain.q  |   1 +
 .../test/queries/clientpositive/tez_self_join.q |   1 +
 ql/src/test/queries/clientpositive/tez_smb_1.q  |   1 +
 .../test/queries/clientpositive/tez_smb_main.q  |   1 +
 ql/src/test/queries/clientpositive/tez_union.q  |   1 +
 ql/src/test/queries/clientpositive/tez_union2.q |   1 +
 .../tez_union_dynamic_partition.q               |   1 +
 .../queries/clientpositive/tez_union_group_by.q |   1 +
 .../clientpositive/tez_union_multiinsert.q      |   1 +
 ql/src/test/queries/clientpositive/transform1.q |   1 +
 ql/src/test/queries/clientpositive/union2.q     |   1 +
 ql/src/test/queries/clientpositive/union3.q     |   1 +
 ql/src/test/queries/clientpositive/union4.q     |   1 +
 ql/src/test/queries/clientpositive/union5.q     |   1 +
 ql/src/test/queries/clientpositive/union6.q     |   1 +
 ql/src/test/queries/clientpositive/union7.q     |   1 +
 ql/src/test/queries/clientpositive/union8.q     |   1 +
 ql/src/test/queries/clientpositive/union9.q     |   1 +
 .../queries/clientpositive/unionDistinct_1.q    |   1 +
 .../queries/clientpositive/vector_aggregate_9.q |   1 +
 .../queries/clientpositive/vector_between_in.q  |   1 +
 .../clientpositive/vector_binary_join_groupby.q |   1 +
 .../test/queries/clientpositive/vector_bucket.q |   1 +
 .../clientpositive/vector_cast_constant.q       |   1 +
 .../test/queries/clientpositive/vector_char_2.q |   1 +
 .../test/queries/clientpositive/vector_char_4.q |   1 +
 .../clientpositive/vector_char_mapjoin1.q       |   1 +
 .../queries/clientpositive/vector_char_simple.q |   1 +
 .../queries/clientpositive/vector_coalesce.q    |   1 +
 .../queries/clientpositive/vector_coalesce_2.q  |   1 +
 .../clientpositive/vector_count_distinct.q      |   1 +
 .../queries/clientpositive/vector_data_types.q  |   1 +
 .../test/queries/clientpositive/vector_date_1.q |   1 +
 .../queries/clientpositive/vector_decimal_1.q   |   1 +
 .../clientpositive/vector_decimal_10_0.q        |   1 +
 .../queries/clientpositive/vector_decimal_2.q   |   1 +
 .../clientpositive/vector_decimal_aggregate.q   |   1 +
 .../clientpositive/vector_decimal_cast.q        |   1 +
 .../clientpositive/vector_decimal_expressions.q |   1 +
 .../clientpositive/vector_decimal_mapjoin.q     |   1 +
 .../clientpositive/vector_decimal_math_funcs.q  |   1 +
 .../clientpositive/vector_decimal_precision.q   |   1 +
 .../clientpositive/vector_decimal_round.q       |   1 +
 .../clientpositive/vector_decimal_round_2.q     |   1 +
 .../queries/clientpositive/vector_decimal_udf.q |   1 +
 .../clientpositive/vector_decimal_udf2.q        |   1 +
 .../queries/clientpositive/vector_distinct_2.q  |   1 +
 ql/src/test/queries/clientpositive/vector_elt.q |   1 +
 .../queries/clientpositive/vector_groupby_3.q   |   1 +
 .../clientpositive/vector_groupby_reduce.q      |   1 +
 .../clientpositive/vector_grouping_sets.q       |   1 +
 .../queries/clientpositive/vector_if_expr.q     |   1 +
 .../queries/clientpositive/vector_inner_join.q  |   1 +
 .../queries/clientpositive/vector_interval_1.q  |   1 +
 .../queries/clientpositive/vector_interval_2.q  |   1 +
 .../clientpositive/vector_interval_mapjoin.q    |   1 +
 .../test/queries/clientpositive/vector_join30.q |   1 +
 .../clientpositive/vector_left_outer_join.q     |   1 +
 .../clientpositive/vector_left_outer_join2.q    |   1 +
 .../clientpositive/vector_leftsemi_mapjoin.q    |   1 +
 .../clientpositive/vector_mapjoin_reduce.q      |   1 +
 .../vector_mr_diff_schema_alias.q               |   1 +
 .../clientpositive/vector_multi_insert.q        |   1 +
 .../vector_non_string_partition.q               |   1 +
 .../clientpositive/vector_null_projection.q     |   1 +
 .../clientpositive/vector_nullsafe_join.q       |   1 +
 .../queries/clientpositive/vector_orderby_5.q   |   1 +
 .../queries/clientpositive/vector_outer_join0.q |   1 +
 .../queries/clientpositive/vector_outer_join1.q |   1 +
 .../queries/clientpositive/vector_outer_join2.q |   1 +
 .../queries/clientpositive/vector_outer_join3.q |   1 +
 .../queries/clientpositive/vector_outer_join4.q |   1 +
 .../queries/clientpositive/vector_outer_join5.q |   1 +
 .../vector_partition_diff_num_cols.q            |   1 +
 .../vector_partitioned_date_time.q              |   1 +
 .../vector_reduce_groupby_decimal.q             |   1 +
 .../clientpositive/vector_string_concat.q       |   1 +
 .../queries/clientpositive/vector_varchar_4.q   |   1 +
 .../clientpositive/vector_varchar_mapjoin1.q    |   1 +
 .../clientpositive/vector_varchar_simple.q      |   1 +
 .../queries/clientpositive/vectorization_0.q    |   1 +
 .../queries/clientpositive/vectorization_13.q   |   1 +
 .../queries/clientpositive/vectorization_14.q   |   1 +
 .../queries/clientpositive/vectorization_15.q   |   1 +
 .../queries/clientpositive/vectorization_16.q   |   1 +
 .../queries/clientpositive/vectorization_7.q    |   1 +
 .../queries/clientpositive/vectorization_8.q    |   1 +
 .../queries/clientpositive/vectorization_9.q    |   1 +
 .../clientpositive/vectorization_decimal_date.q |   1 +
 .../queries/clientpositive/vectorization_div0.q |   1 +
 .../clientpositive/vectorization_limit.q        |   1 +
 .../clientpositive/vectorization_part_project.q |   1 +
 .../clientpositive/vectorization_pushdown.q     |   1 +
 .../vectorization_short_regress.q               |   1 +
 .../clientpositive/vectorized_bucketmapjoin1.q  |   1 +
 .../queries/clientpositive/vectorized_case.q    |   1 +
 .../queries/clientpositive/vectorized_casts.q   |   1 +
 .../queries/clientpositive/vectorized_context.q |   1 +
 .../clientpositive/vectorized_date_funcs.q      |   1 +
 .../clientpositive/vectorized_distinct_gby.q    |   1 +
 .../vectorized_dynamic_partition_pruning.q      |   1 +
 .../queries/clientpositive/vectorized_mapjoin.q |   1 +
 .../clientpositive/vectorized_math_funcs.q      |   1 +
 .../clientpositive/vectorized_nested_mapjoin.q  |   1 +
 .../queries/clientpositive/vectorized_parquet.q |   1 +
 .../clientpositive/vectorized_shufflejoin.q     |   1 +
 .../clientpositive/vectorized_string_funcs.q    |   1 +
 .../clientpositive/vectorized_timestamp_funcs.q |   1 +
 .../clientpositive/tez/explainuser_1.q.out      | 188 +++++--
 .../clientpositive/tez/explainuser_2.q.out      | 221 ++++----
 .../clientpositive/tez/explainuser_3.q.out      | 522 +++++++++++++++++++
 203 files changed, 1346 insertions(+), 299 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Attr.java
----------------------------------------------------------------------
diff --git 
a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Attr.java 
b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Attr.java
index e3b9eef..5d355d2 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Attr.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Attr.java
@@ -18,9 +18,9 @@
 
 package org.apache.hadoop.hive.common.jsonexplain.tez;
 
-public class Attr implements Comparable<Attr> {
-  String name;
-  String value;
+public final class Attr implements Comparable<Attr> {
+  public final String name;
+  public final String value;
 
   public Attr(String name, String value) {
     super();

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Connection.java
----------------------------------------------------------------------
diff --git 
a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Connection.java 
b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Connection.java
index c7a4ed6..d341cb1 100644
--- 
a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Connection.java
+++ 
b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Connection.java
@@ -18,9 +18,9 @@
 
 package org.apache.hadoop.hive.common.jsonexplain.tez;
 
-public class Connection {
-  public String type;
-  public Vertex from;
+public final class Connection {
+  public final String type;
+  public final Vertex from;
 
   public Connection(String type, Vertex from) {
     super();

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java
----------------------------------------------------------------------
diff --git 
a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java 
b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java
index fb12f70..9ecba7c 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hive.common.jsonexplain.tez;
 
-import java.io.PrintStream;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -28,22 +27,24 @@ import java.util.Map;
 import org.json.JSONException;
 import org.json.JSONObject;
 
-public class Op {
-  String name;
-  String operatorId;
-  Op parent;
-  List<Op> children;
-  List<Attr> attrs;
+public final class Op {
+  public final String name;
+  //tezJsonParser
+  public final TezJsonParser parser;
+  public final String operatorId;
+  public Op parent;
+  public final List<Op> children;
+  public final List<Attr> attrs;
   // the jsonObject for this operator
-  JSONObject opObject;
+  public final JSONObject opObject;
   // the vertex that this operator belongs to
-  Vertex vertex;
+  public final Vertex vertex;
   // the vertex that this operator output to if this operator is a
   // ReduceOutputOperator
-  String outputVertexName;
+  public final String outputVertexName;
 
   public Op(String name, String id, String outputVertexName, List<Op> 
children, List<Attr> attrs,
-      JSONObject opObject, Vertex vertex) throws JSONException {
+      JSONObject opObject, Vertex vertex, TezJsonParser tezJsonParser) throws 
JSONException {
     super();
     this.name = name;
     this.operatorId = id;
@@ -52,6 +53,7 @@ public class Op {
     this.attrs = attrs;
     this.opObject = opObject;
     this.vertex = vertex;
+    this.parser = tezJsonParser;
   }
 
   private void inlineJoinOp() throws Exception {
@@ -73,7 +75,7 @@ public class Op {
           }
         }
         if (c != null) {
-          TezJsonParser.addInline(this, c);
+          parser.addInline(this, c);
         }
       }
       // update the attrs
@@ -96,14 +98,12 @@ public class Op {
       }
     }
     // inline merge join operator in a self-join
-    else if (this.name.equals("Merge Join Operator")) {
+    else {
       if (this.vertex != null) {
         for (Vertex v : this.vertex.mergeJoinDummyVertexs) {
-          TezJsonParser.addInline(this, new Connection(null, v));
+          parser.addInline(this, new Connection(null, v));
         }
       }
-    } else {
-      throw new Exception("Unknown join operator");
     }
   }
 
@@ -123,23 +123,23 @@ public class Op {
    *          operator so that we can decide the corresponding indent.
    * @throws Exception
    */
-  public void print(PrintStream out, List<Boolean> indentFlag, boolean 
branchOfJoinOp)
+  public void print(Printer printer, List<Boolean> indentFlag, boolean 
branchOfJoinOp)
       throws Exception {
     // print name
-    if (TezJsonParser.printSet.contains(this)) {
-      out.println(TezJsonParser.prefixString(indentFlag) + " Please refer to 
the previous "
+    if (parser.printSet.contains(this)) {
+      printer.println(TezJsonParser.prefixString(indentFlag) + " Please refer 
to the previous "
           + this.getNameWithOpId());
       return;
     }
-    TezJsonParser.printSet.add(this);
+    parser.printSet.add(this);
     if (!branchOfJoinOp) {
-      out.println(TezJsonParser.prefixString(indentFlag) + 
this.getNameWithOpId());
+      printer.println(TezJsonParser.prefixString(indentFlag) + 
this.getNameWithOpId());
     } else {
-      out.println(TezJsonParser.prefixString(indentFlag, "|<-") + 
this.getNameWithOpId());
+      printer.println(TezJsonParser.prefixString(indentFlag, "|<-") + 
this.getNameWithOpId());
     }
     branchOfJoinOp = false;
-    // if this operator is a join operator
-    if (this.name.contains("Join")) {
+    // if this operator is a Map Join Operator or a Merge Join Operator
+    if (this.name.equals("Map Join Operator") || this.name.equals("Merge Join 
Operator")) {
       inlineJoinOp();
       branchOfJoinOp = true;
     }
@@ -149,7 +149,7 @@ public class Op {
     if (this.parent == null) {
       if (this.vertex != null) {
         for (Connection connection : this.vertex.parentConnections) {
-          if (!TezJsonParser.isInline(connection.from)) {
+          if (!parser.isInline(connection.from)) {
             noninlined.add(connection);
           }
         }
@@ -167,12 +167,12 @@ public class Op {
     }
     Collections.sort(attrs);
     for (Attr attr : attrs) {
-      out.println(TezJsonParser.prefixString(attFlag) + attr.toString());
+      printer.println(TezJsonParser.prefixString(attFlag) + attr.toString());
     }
     // print inline vertex
-    if (TezJsonParser.inlineMap.containsKey(this)) {
-      for (int index = 0; index < TezJsonParser.inlineMap.get(this).size(); 
index++) {
-        Connection connection = TezJsonParser.inlineMap.get(this).get(index);
+    if (parser.inlineMap.containsKey(this)) {
+      for (int index = 0; index < parser.inlineMap.get(this).size(); index++) {
+        Connection connection = parser.inlineMap.get(this).get(index);
         List<Boolean> vertexFlag = new ArrayList<>();
         vertexFlag.addAll(indentFlag);
         if (branchOfJoinOp) {
@@ -185,7 +185,7 @@ public class Op {
         else {
           vertexFlag.add(false);
         }
-        connection.from.print(out, vertexFlag, connection.type, this.vertex);
+        connection.from.print(printer, vertexFlag, connection.type, 
this.vertex);
       }
     }
     // print parent op, i.e., where data comes from
@@ -193,7 +193,7 @@ public class Op {
       List<Boolean> parentFlag = new ArrayList<>();
       parentFlag.addAll(indentFlag);
       parentFlag.add(false);
-      this.parent.print(out, parentFlag, branchOfJoinOp);
+      this.parent.print(printer, parentFlag, branchOfJoinOp);
     }
     // print next vertex
     else {
@@ -206,7 +206,7 @@ public class Op {
         } else {
           vertexFlag.add(false);
         }
-        v.print(out, vertexFlag, noninlined.get(index).type, this.vertex);
+        v.print(printer, vertexFlag, noninlined.get(index).type, this.vertex);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Printer.java
----------------------------------------------------------------------
diff --git 
a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Printer.java 
b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Printer.java
new file mode 100644
index 0000000..d3c91d6
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Printer.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.common.jsonexplain.tez;
+
+public final class Printer {
+  public static final String lineSeparator = 
System.getProperty("line.separator");;
+  private final StringBuilder builder = new StringBuilder();
+
+  public void print(String string) {
+    builder.append(string);
+  }
+
+  public void println(String string) {
+    builder.append(string);
+    builder.append(lineSeparator);
+  }
+
+  public void println() {
+    builder.append(lineSeparator);
+  }
+  
+  public String toString() {
+    return builder.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Stage.java
----------------------------------------------------------------------
diff --git 
a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Stage.java 
b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Stage.java
index 10e0a0c..c5a78b5 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Stage.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Stage.java
@@ -19,13 +19,13 @@
 package org.apache.hadoop.hive.common.jsonexplain.tez;
 
 import java.io.IOException;
-import java.io.PrintStream;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.TreeMap;
 
 import org.apache.hadoop.fs.Path;
 import org.codehaus.jackson.JsonParseException;
@@ -34,30 +34,33 @@ import org.json.JSONArray;
 import org.json.JSONException;
 import org.json.JSONObject;
 
-public class Stage {
-  String name;
+public final class Stage {
+  //external name is used to show at the console
+  String externalName;
+  //internal name is used to track the stages
+  public final String internalName;
+  //tezJsonParser
+  public final TezJsonParser parser;
   // upstream stages, e.g., root stage
-  List<Stage> parentStages;
+  public final List<Stage> parentStages = new ArrayList<>();
   // downstream stages.
-  List<Stage> childStages;
-  Map<String, Vertex> vertexs;
-  List<Attr> attrs;
-  LinkedHashMap<Vertex, List<Connection>> tezStageDependency;
+  public final List<Stage> childStages = new ArrayList<>();
+  public final Map<String, Vertex> vertexs =new LinkedHashMap<>();
+  public final List<Attr> attrs = new ArrayList<>();
+  Map<Vertex, List<Connection>> tezStageDependency;
   // some stage may contain only a single operator, e.g., create table 
operator,
   // fetch operator.
   Op op;
 
-  public Stage(String name) {
+  public Stage(String name, TezJsonParser tezJsonParser) {
     super();
-    this.name = name;
-    parentStages = new ArrayList<>();
-    childStages = new ArrayList<>();
-    attrs = new ArrayList<>();
-    vertexs = new LinkedHashMap<>();
+    internalName = name;
+    externalName = name;
+    parser = tezJsonParser;
   }
 
   public void addDependency(JSONObject object, Map<String, Stage> stages) 
throws JSONException {
-    if (!object.has("ROOT STAGE")) {
+    if (object.has("DEPENDENT STAGES")) {
       String names = object.getString("DEPENDENT STAGES");
       for (String name : names.split(",")) {
         Stage parent = stages.get(name.trim());
@@ -65,6 +68,16 @@ public class Stage {
         parent.childStages.add(this);
       }
     }
+    if (object.has("CONDITIONAL CHILD TASKS")) {
+      String names = object.getString("CONDITIONAL CHILD TASKS");
+      this.externalName = this.internalName + "(CONDITIONAL CHILD TASKS: " + 
names + ")";
+      for (String name : names.split(",")) {
+        Stage child = stages.get(name.trim());
+        child.externalName = child.internalName + "(CONDITIONAL)";
+        child.parentStages.add(this);
+        this.childStages.add(child);
+      }
+    }
   }
 
   /**
@@ -76,14 +89,14 @@ public class Stage {
    */
   public void extractVertex(JSONObject object) throws Exception {
     if (object.has("Tez")) {
-      this.tezStageDependency = new LinkedHashMap<>();
+      this.tezStageDependency = new TreeMap<>();
       JSONObject tez = (JSONObject) object.get("Tez");
       JSONObject vertices = tez.getJSONObject("Vertices:");
       if (tez.has("Edges:")) {
         JSONObject edges = tez.getJSONObject("Edges:");
         // iterate for the first time to get all the vertices
         for (String to : JSONObject.getNames(edges)) {
-          vertexs.put(to, new Vertex(to, vertices.getJSONObject(to)));
+          vertexs.put(to, new Vertex(to, vertices.getJSONObject(to), parser));
         }
         // iterate for the second time to get all the vertex dependency
         for (String to : JSONObject.getNames(edges)) {
@@ -95,7 +108,7 @@ public class Stage {
             String parent = obj.getString("parent");
             Vertex parentVertex = vertexs.get(parent);
             if (parentVertex == null) {
-              parentVertex = new Vertex(parent, 
vertices.getJSONObject(parent));
+              parentVertex = new Vertex(parent, 
vertices.getJSONObject(parent), parser);
               vertexs.put(parent, parentVertex);
             }
             String type = obj.getString("type");
@@ -117,7 +130,7 @@ public class Stage {
               String parent = obj.getString("parent");
               Vertex parentVertex = vertexs.get(parent);
               if (parentVertex == null) {
-                parentVertex = new Vertex(parent, 
vertices.getJSONObject(parent));
+                parentVertex = new Vertex(parent, 
vertices.getJSONObject(parent), parser);
                 vertexs.put(parent, parentVertex);
               }
               String type = obj.getString("type");
@@ -135,7 +148,7 @@ public class Stage {
         }
       } else {
         for (String vertexName : JSONObject.getNames(vertices)) {
-          vertexs.put(vertexName, new Vertex(vertexName, 
vertices.getJSONObject(vertexName)));
+          vertexs.put(vertexName, new Vertex(vertexName, 
vertices.getJSONObject(vertexName), parser));
         }
       }
       // The opTree in vertex is extracted
@@ -147,11 +160,13 @@ public class Stage {
       }
     } else {
       String[] names = JSONObject.getNames(object);
-      for (String name : names) {
-        if (name.contains("Operator")) {
-          this.op = extractOp(name, object.getJSONObject(name));
-        } else {
-          attrs.add(new Attr(name, object.get(name).toString()));
+      if (names != null) {
+        for (String name : names) {
+          if (name.contains("Operator")) {
+            this.op = extractOp(name, object.getJSONObject(name));
+          } else {
+            attrs.add(new Attr(name, object.get(name).toString()));
+          }
         }
       }
     }
@@ -185,7 +200,7 @@ public class Stage {
             if (name.equals("Processor Tree:")) {
               JSONObject object = new JSONObject();
               object.put(name, attrObj);
-              v = new Vertex(null, object);
+              v = new Vertex(null, object, parser);
               v.extractOpTree();
             } else {
               for (String attrName : JSONObject.getNames(attrObj)) {
@@ -194,13 +209,13 @@ public class Stage {
             }
           }
         } else {
-          throw new Exception("Unsupported object in " + this.name);
+          throw new Exception("Unsupported object in " + this.internalName);
         }
       }
     }
-    Op op = new Op(opName, null, null, null, attrs, null, v);
+    Op op = new Op(opName, null, null, null, attrs, null, v, parser);
     if (v != null) {
-      TezJsonParser.addInline(op, new Connection(null, v));
+      parser.addInline(op, new Connection(null, v));
     }
     return op;
   }
@@ -217,37 +232,37 @@ public class Stage {
     return false;
   }
 
-  public void print(PrintStream out, List<Boolean> indentFlag) throws 
JSONException, Exception {
+  public void print(Printer printer, List<Boolean> indentFlag) throws 
JSONException, Exception {
     // print stagename
-    if (TezJsonParser.printSet.contains(this)) {
-      out.println(TezJsonParser.prefixString(indentFlag) + " Please refer to 
the previous "
-          + this.name);
+    if (parser.printSet.contains(this)) {
+      printer.println(TezJsonParser.prefixString(indentFlag) + " Please refer 
to the previous "
+          + externalName);
       return;
     }
-    TezJsonParser.printSet.add(this);
-    out.println(TezJsonParser.prefixString(indentFlag) + this.name);
+    parser.printSet.add(this);
+    printer.println(TezJsonParser.prefixString(indentFlag) + externalName);
     // print vertexes
     List<Boolean> nextIndentFlag = new ArrayList<>();
     nextIndentFlag.addAll(indentFlag);
     nextIndentFlag.add(false);
     for (Vertex candidate : this.vertexs.values()) {
-      if (!TezJsonParser.isInline(candidate) && candidate.children.isEmpty()) {
-        candidate.print(out, nextIndentFlag, null, null);
+      if (!parser.isInline(candidate) && candidate.children.isEmpty()) {
+        candidate.print(printer, nextIndentFlag, null, null);
       }
     }
     if (!attrs.isEmpty()) {
       Collections.sort(attrs);
       for (Attr attr : attrs) {
-        out.println(TezJsonParser.prefixString(nextIndentFlag) + 
attr.toString());
+        printer.println(TezJsonParser.prefixString(nextIndentFlag) + 
attr.toString());
       }
     }
     if (op != null) {
-      op.print(out, nextIndentFlag, false);
+      op.print(printer, nextIndentFlag, false);
     }
     nextIndentFlag.add(false);
     // print dependent stages
     for (Stage stage : this.parentStages) {
-      stage.print(out, nextIndentFlag);
+      stage.print(printer, nextIndentFlag);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParser.java
----------------------------------------------------------------------
diff --git 
a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParser.java
 
b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParser.java
index 43ddff3..c6ee4f6 100644
--- 
a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParser.java
+++ 
b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParser.java
@@ -36,28 +36,28 @@ import org.codehaus.jackson.map.JsonMappingException;
 import org.json.JSONException;
 import org.json.JSONObject;
 
-public class TezJsonParser implements JsonParser {
-  JSONObject inputObject;
-  Map<String, Stage> stages;
-  PrintStream outputStream;
+public final class TezJsonParser implements JsonParser {
+  public final Map<String, Stage> stages = new HashMap<String, Stage>();;
   protected final Log LOG;
   // the object that has been printed.
-  public static Set<Object> printSet = new HashSet<>();
-  // the vertex that should be inlined. <Operator, list of Vertex that is 
inlined>
-  public static Map<Op, List<Connection>> inlineMap = new HashMap<>();
+  public final Set<Object> printSet = new HashSet<>();
+  // the vertex that should be inlined. <Operator, list of Vertex that is
+  // inlined>
+  public final Map<Op, List<Connection>> inlineMap = new HashMap<>();
+
   public TezJsonParser() {
     super();
     LOG = LogFactory.getLog(this.getClass().getName());
   }
-  public void extractStagesAndPlans() throws JSONException, JsonParseException,
-      JsonMappingException, Exception, IOException {
+
+  public void extractStagesAndPlans(JSONObject inputObject) throws 
JSONException,
+      JsonParseException, JsonMappingException, Exception, IOException {
     // extract stages
-    this.stages = new HashMap<String, Stage>();
     JSONObject dependency = inputObject.getJSONObject("STAGE DEPENDENCIES");
-    if (dependency.length() > 0) {
+    if (dependency != null && dependency.length() > 0) {
       // iterate for the first time to get all the names of stages.
       for (String stageName : JSONObject.getNames(dependency)) {
-        this.stages.put(stageName, new Stage(stageName));
+        this.stages.put(stageName, new Stage(stageName, this));
       }
       // iterate for the second time to get all the dependency.
       for (String stageName : JSONObject.getNames(dependency)) {
@@ -67,7 +67,7 @@ public class TezJsonParser implements JsonParser {
     }
     // extract stage plans
     JSONObject stagePlans = inputObject.getJSONObject("STAGE PLANS");
-    if (stagePlans.length() > 0) {
+    if (stagePlans != null && stagePlans.length() > 0) {
       for (String stageName : JSONObject.getNames(stagePlans)) {
         JSONObject stagePlan = stagePlans.getJSONObject(stageName);
         this.stages.get(stageName).extractVertex(stagePlan);
@@ -77,8 +77,8 @@ public class TezJsonParser implements JsonParser {
 
   /**
    * @param indentFlag
-   * help to generate correct indent
-   * @return 
+   *          help to generate correct indent
+   * @return
    */
   public static String prefixString(List<Boolean> indentFlag) {
     StringBuilder sb = new StringBuilder();
@@ -94,7 +94,7 @@ public class TezJsonParser implements JsonParser {
   /**
    * @param indentFlag
    * @param tail
-   * help to generate correct indent with a specific tail
+   *          help to generate correct indent with a specific tail
    * @return
    */
   public static String prefixString(List<Boolean> indentFlag, String tail) {
@@ -111,19 +111,18 @@ public class TezJsonParser implements JsonParser {
 
   @Override
   public void print(JSONObject inputObject, PrintStream outputStream) throws 
Exception {
-    LOG.info("JsonParser is parsing\n" + inputObject.toString());
-    this.inputObject = inputObject;
-    this.outputStream = outputStream;
-    this.extractStagesAndPlans();
+    LOG.info("JsonParser is parsing:" + inputObject.toString());
+    this.extractStagesAndPlans(inputObject);
+    Printer printer = new Printer();
     // print out the cbo info
     if (inputObject.has("cboInfo")) {
-      outputStream.println(inputObject.getString("cboInfo"));
-      outputStream.println();
+      printer.println(inputObject.getString("cboInfo"));
+      printer.println();
     }
     // print out the vertex dependency in root stage
     for (Stage candidate : this.stages.values()) {
       if (candidate.tezStageDependency != null && 
candidate.tezStageDependency.size() > 0) {
-        outputStream.println("Vertex dependency in root stage");
+        printer.println("Vertex dependency in root stage");
         for (Entry<Vertex, List<Connection>> entry : 
candidate.tezStageDependency.entrySet()) {
           StringBuilder sb = new StringBuilder();
           sb.append(entry.getKey().name);
@@ -137,21 +136,22 @@ public class TezJsonParser implements JsonParser {
             }
             sb.append(connection.from.name + " (" + connection.type + ")");
           }
-          outputStream.println(sb.toString());
+          printer.println(sb.toString());
         }
-        outputStream.println();
+        printer.println();
       }
     }
     List<Boolean> indentFlag = new ArrayList<>();
     // print out all the stages that have no childStages.
     for (Stage candidate : this.stages.values()) {
       if (candidate.childStages.isEmpty()) {
-        candidate.print(outputStream, indentFlag);
+        candidate.print(printer, indentFlag);
       }
     }
+    outputStream.println(printer.toString());
   }
 
-  public static void addInline(Op op, Connection connection) {
+  public void addInline(Op op, Connection connection) {
     List<Connection> list = inlineMap.get(op);
     if (list == null) {
       list = new ArrayList<>();
@@ -161,10 +161,11 @@ public class TezJsonParser implements JsonParser {
       list.add(connection);
     }
   }
-  public static boolean isInline(Vertex v) {
-    for(List<Connection> list : inlineMap.values()){
+
+  public boolean isInline(Vertex v) {
+    for (List<Connection> list : inlineMap.values()) {
       for (Connection connection : list) {
-        if(connection.from.equals(v)){
+        if (connection.from.equals(v)) {
           return true;
         }
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java
----------------------------------------------------------------------
diff --git 
a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java 
b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java
index 9b3405e..67ff8eb 100644
--- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java
+++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hive.common.jsonexplain.tez;
 
 import java.io.IOException;
-import java.io.PrintStream;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -29,28 +28,30 @@ import org.json.JSONArray;
 import org.json.JSONException;
 import org.json.JSONObject;
 
-public class Vertex {
-  public String name;
+public final class Vertex implements Comparable<Vertex>{
+  public final String name;
+  //tezJsonParser
+  public final TezJsonParser parser;
   // vertex's parent connections.
-  public List<Connection> parentConnections;
+  public final List<Connection> parentConnections = new ArrayList<>();
   // vertex's children vertex.
-  public List<Vertex> children;
+  public final List<Vertex> children = new ArrayList<>();
   // the jsonObject for this vertex
-  public JSONObject vertexObject;
+  public final JSONObject vertexObject;
   // whether this vertex is a union vertex
   public boolean union;
   // whether this vertex is dummy (which does not really exists but is 
created),
   // e.g., a dummy vertex for a mergejoin branch
   public boolean dummy;
   // the rootOps in this vertex
-  public List<Op> rootOps;
+  public final List<Op> rootOps = new ArrayList<>();
   // we create a dummy vertex for a mergejoin branch for a self join if this
   // vertex is a mergejoin
-  public List<Vertex> mergeJoinDummyVertexs;
+  public final List<Vertex> mergeJoinDummyVertexs = new ArrayList<>();
   // whether this vertex has multiple reduce operators
-  boolean hasMultiReduceOp;
+  public boolean hasMultiReduceOp = false;
 
-  public Vertex(String name, JSONObject vertexObject) {
+  public Vertex(String name, JSONObject vertexObject, TezJsonParser 
tezJsonParser) {
     super();
     this.name = name;
     if (this.name != null && this.name.contains("Union")) {
@@ -60,11 +61,7 @@ public class Vertex {
     }
     this.dummy = false;
     this.vertexObject = vertexObject;
-    this.parentConnections = new ArrayList<>();
-    this.children = new ArrayList<>();
-    this.rootOps = new ArrayList<>();
-    this.mergeJoinDummyVertexs = new ArrayList<>();
-    this.hasMultiReduceOp = false;
+    parser = tezJsonParser;
   }
 
   public void addDependency(Connection connection) throws JSONException {
@@ -88,20 +85,26 @@ public class Vertex {
           extractOp(vertexObject.getJSONArray(key).getJSONObject(0));
         } else if (key.equals("Reduce Operator Tree:") || 
key.equals("Processor Tree:")) {
           extractOp(vertexObject.getJSONObject(key));
-        }
-        // this is the case when we have a map-side SMB join
-        // one input of the join is treated as a dummy vertex
-        else if (key.equals("Join:")) {
+        } else if (key.equals("Join:")) {
+          // this is the case when we have a map-side SMB join
+          // one input of the join is treated as a dummy vertex
           JSONArray array = vertexObject.getJSONArray(key);
           for (int index = 0; index < array.length(); index++) {
             JSONObject mpOpTree = array.getJSONObject(index);
-            Vertex v = new Vertex("", mpOpTree);
+            Vertex v = new Vertex("", mpOpTree, parser);
             v.extractOpTree();
             v.dummy = true;
             mergeJoinDummyVertexs.add(v);
           }
+        } else if (key.equals("Merge File Operator")) {
+          JSONObject opTree = vertexObject.getJSONObject(key);
+          if (opTree.has("Map Operator Tree:")) {
+            extractOp(opTree.getJSONArray("Map Operator 
Tree:").getJSONObject(0));
+          } else {
+            throw new Exception("Merge File Operator does not have a Map 
Operator Tree");
+          }
         } else {
-          throw new Exception("unsupported operator tree in vertex " + 
this.name);
+          throw new Exception("Unsupported operator tree in vertex " + 
this.name);
         }
       }
     }
@@ -159,7 +162,7 @@ public class Vertex {
           }
         }
       }
-      Op op = new Op(opName, id, outputVertexName, children, attrs, operator, 
this);
+      Op op = new Op(opName, id, outputVertexName, children, attrs, operator, 
this, parser);
       if (!children.isEmpty()) {
         for (Op child : children) {
           child.parent = op;
@@ -171,24 +174,24 @@ public class Vertex {
     }
   }
 
-  public void print(PrintStream out, List<Boolean> indentFlag, String type, 
Vertex callingVertex)
+  public void print(Printer printer, List<Boolean> indentFlag, String type, 
Vertex callingVertex)
       throws JSONException, Exception {
     // print vertexname
-    if (TezJsonParser.printSet.contains(this) && !hasMultiReduceOp) {
+    if (parser.printSet.contains(this) && !hasMultiReduceOp) {
       if (type != null) {
-        out.println(TezJsonParser.prefixString(indentFlag, "|<-")
+        printer.println(TezJsonParser.prefixString(indentFlag, "|<-")
             + " Please refer to the previous " + this.name + " [" + type + 
"]");
       } else {
-        out.println(TezJsonParser.prefixString(indentFlag, "|<-")
+        printer.println(TezJsonParser.prefixString(indentFlag, "|<-")
             + " Please refer to the previous " + this.name);
       }
       return;
     }
-    TezJsonParser.printSet.add(this);
+    parser.printSet.add(this);
     if (type != null) {
-      out.println(TezJsonParser.prefixString(indentFlag, "|<-") + this.name + 
" [" + type + "]");
+      printer.println(TezJsonParser.prefixString(indentFlag, "|<-") + 
this.name + " [" + type + "]");
     } else if (this.name != null) {
-      out.println(TezJsonParser.prefixString(indentFlag) + this.name);
+      printer.println(TezJsonParser.prefixString(indentFlag) + this.name);
     }
     // print operators
     if (hasMultiReduceOp && !callingVertex.union) {
@@ -200,7 +203,7 @@ public class Vertex {
         }
       }
       if (choose != null) {
-        choose.print(out, indentFlag, false);
+        choose.print(printer, indentFlag, false);
       } else {
         throw new Exception("Can not find the right reduce output operator for 
vertex " + this.name);
       }
@@ -208,9 +211,9 @@ public class Vertex {
       for (Op op : this.rootOps) {
         // dummy vertex is treated as a branch of a join operator
         if (this.dummy) {
-          op.print(out, indentFlag, true);
+          op.print(printer, indentFlag, true);
         } else {
-          op.print(out, indentFlag, false);
+          op.print(printer, indentFlag, false);
         }
       }
     }
@@ -225,7 +228,7 @@ public class Vertex {
         } else {
           unionFlag.add(false);
         }
-        connection.from.print(out, unionFlag, connection.type, this);
+        connection.from.print(printer, unionFlag, connection.type, this);
       }
     }
   }
@@ -245,4 +248,10 @@ public class Vertex {
     }
     this.hasMultiReduceOp = true;
   }
+
+  //The following code should be gone after HIVE-11075 using topological order
+  @Override
+  public int compareTo(Vertex o) {
+    return this.name.compareTo(o.name);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index e86f779..ad469c2 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1705,7 +1705,7 @@ public class HiveConf extends Configuration {
     HIVE_LOG_EXPLAIN_OUTPUT("hive.log.explain.output", false,
         "Whether to log explain output for every query.\n" +
         "When enabled, will log EXPLAIN EXTENDED output for the query at INFO 
log4j log level."),
-    HIVE_EXPLAIN_USER("hive.explain.user", false,
+    HIVE_EXPLAIN_USER("hive.explain.user", true,
         "Whether to show explain result at user level.\n" +
         "When enabled, will log EXPLAIN output for the query at user level."),
 

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index 7b7559a..14ef33b 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -321,6 +321,7 @@ minitez.query.files=bucket_map_join_tez1.q,\
   dynamic_partition_pruning_2.q,\
   explainuser_1.q,\
   explainuser_2.q,\
+  explainuser_3.q,\
   hybridgrace_hashjoin_1.q,\
   hybridgrace_hashjoin_2.q,\
   mapjoin_decimal.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
index 35c4cfc..c6b49bf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java
@@ -39,6 +39,8 @@ import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeMap;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.jsonexplain.JsonParser;
 import org.apache.hadoop.hive.common.jsonexplain.JsonParserFactory;
@@ -49,7 +51,6 @@ import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.optimizer.physical.StageIDsRearranger;
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.Explain;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
 import org.apache.hadoop.hive.ql.plan.ExplainWork;
@@ -76,9 +77,11 @@ public class ExplainTask extends Task<ExplainWork> 
implements Serializable {
   public static final String EXPL_COLUMN_NAME = "Explain";
   private final Set<Operator<?>> visitedOps = new HashSet<Operator<?>>();
   private boolean isLogical = false;
+  protected final Log LOG;
 
   public ExplainTask() {
     super();
+    LOG = LogFactory.getLog(this.getClass().getName());
   }
 
   /*
@@ -288,28 +291,29 @@ public class ExplainTask extends Task<ExplainWork> 
implements Serializable {
         JSONObject jsonDependencies = getJSONDependencies(work);
         out.print(jsonDependencies);
       } else {
-        if (work.getDependency()) {
-          JSONObject jsonDependencies = getJSONDependencies(work);
-          out.print(jsonDependencies);
+        if (work.isUserLevelExplain()) {
+          // Because of the implementation of the JsonParserFactory, we are 
sure
+          // that we can get a TezJsonParser.
+          JsonParser jsonParser = JsonParserFactory.getParser(conf);
+          work.setFormatted(true);
+          JSONObject jsonPlan = getJSONPlan(out, work);
+          if (work.getCboInfo() != null) {
+            jsonPlan.put("cboInfo", work.getCboInfo());
+          }
+          try {
+            jsonParser.print(jsonPlan, out);
+          } catch (Exception e) {
+            // if there is anything wrong happen, we bail out.
+            LOG.error("Running explain user level has problem: " + e.toString()
+                + ". Falling back to normal explain");
+            work.setFormatted(false);
+            work.setUserLevelExplain(false);
+            jsonPlan = getJSONPlan(out, work);
+          }
         } else {
-          if (work.isUserLevelExplain()) {
-            JsonParser jsonParser = JsonParserFactory.getParser(conf);
-            if (jsonParser != null) {
-              work.setFormatted(true);
-              JSONObject jsonPlan = getJSONPlan(out, work);
-              if (work.getCboInfo() != null) {
-                jsonPlan.put("cboInfo", work.getCboInfo());
-              }
-              jsonParser.print(jsonPlan, out);
-            } else {
-              throw new SemanticException(
-                  "Hive UserLevelExplain only supports tez engine right now.");
-            }
-          } else {
-            JSONObject jsonPlan = getJSONPlan(out, work);
-            if (work.isFormatted()) {
-              out.print(jsonPlan);
-            }
+          JSONObject jsonPlan = getJSONPlan(out, work);
+          if (work.isFormatted()) {
+            out.print(jsonPlan);
           }
         }
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
index 3fbc8de..66d1546 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
@@ -91,8 +91,13 @@ public class ExplainSemanticAnalyzer extends 
BaseSemanticAnalyzer {
       pCtx = ((SemanticAnalyzer)sem).getParseContext();
     }
 
-    boolean userLevelExplain = !extended && !formatted && !dependency && 
!logical && !authorize
-        && HiveConf.getBoolVar(ctx.getConf(), 
HiveConf.ConfVars.HIVE_EXPLAIN_USER);
+    boolean userLevelExplain = !extended
+        && !formatted
+        && !dependency
+        && !logical
+        && !authorize
+        && (HiveConf.getBoolVar(ctx.getConf(), 
HiveConf.ConfVars.HIVE_EXPLAIN_USER) && HiveConf
+            .getVar(conf, 
HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez"));
     ExplainWork work = new ExplainWork(ctx.getResFile(),
         pCtx,
         tasks,

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalWork.java 
b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalWork.java
index 7a561e6..58120d3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalWork.java
@@ -21,11 +21,13 @@ package org.apache.hadoop.hive.ql.plan;
 import java.io.Serializable;
 import java.util.List;
 
+import org.apache.hadoop.hive.ql.plan.Explain.Level;
+
 /**
  * ConditionalWork.
  *
  */
-@Explain(displayName = "Conditional Operator")
+@Explain(displayName = "Conditional Operator", explainLevels = { Level.USER, 
Level.DEFAULT, Level.EXTENDED })
 public class ConditionalWork implements Serializable {
   private static final long serialVersionUID = 1L;
   List<? extends Serializable> listWorks;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/auto_join0.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_join0.q 
b/ql/src/test/queries/clientpositive/auto_join0.q
index 008f9e3..24647fc 100644
--- a/ql/src/test/queries/clientpositive/auto_join0.q
+++ b/ql/src/test/queries/clientpositive/auto_join0.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.auto.convert.join = true;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/auto_join1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_join1.q 
b/ql/src/test/queries/clientpositive/auto_join1.q
index 7414005..3aec73f 100644
--- a/ql/src/test/queries/clientpositive/auto_join1.q
+++ b/ql/src/test/queries/clientpositive/auto_join1.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.auto.convert.join =true;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/auto_join21.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_join21.q 
b/ql/src/test/queries/clientpositive/auto_join21.q
index 17e8a88..10ac490 100644
--- a/ql/src/test/queries/clientpositive/auto_join21.q
+++ b/ql/src/test/queries/clientpositive/auto_join21.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.auto.convert.join = true;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/auto_join29.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_join29.q 
b/ql/src/test/queries/clientpositive/auto_join29.q
index c9eb9b0..f991540 100644
--- a/ql/src/test/queries/clientpositive/auto_join29.q
+++ b/ql/src/test/queries/clientpositive/auto_join29.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.auto.convert.join = true;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/auto_join30.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_join30.q 
b/ql/src/test/queries/clientpositive/auto_join30.q
index 9e31f0f..7ac3c0e 100644
--- a/ql/src/test/queries/clientpositive/auto_join30.q
+++ b/ql/src/test/queries/clientpositive/auto_join30.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.auto.convert.join = true;
 
 explain

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q 
b/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q
index 83e1cec..c07dd23 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.enforce.bucketing = true;
 set hive.enforce.sorting = true;
 set hive.exec.reducers.max = 1;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q 
b/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q
index e92504a..f35fec1 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.enforce.bucketing = true;
 set hive.enforce.sorting = true;
 set hive.exec.reducers.max = 1;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q 
b/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q
index 4cda4df..eabeff0 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.enforce.bucketing = true;
 set hive.enforce.sorting = true;
 set hive.exec.reducers.max = 1;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q 
b/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q
index c7bcae6..a553d93 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.enforce.bucketing = true;
 set hive.enforce.sorting = true;
 set hive.exec.reducers.max = 1;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q 
b/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q
index e3766e7..9eb85d3 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.enforce.bucketing = true;
 set hive.enforce.sorting = true;
 set hive.exec.reducers.max = 1;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/bucket2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket2.q 
b/ql/src/test/queries/clientpositive/bucket2.q
index f9f1627..ecd7e53 100644
--- a/ql/src/test/queries/clientpositive/bucket2.q
+++ b/ql/src/test/queries/clientpositive/bucket2.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.enforce.bucketing = true;
 set hive.exec.reducers.max = 1;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/bucket3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket3.q 
b/ql/src/test/queries/clientpositive/bucket3.q
index b0f89c8..7b7a9c3 100644
--- a/ql/src/test/queries/clientpositive/bucket3.q
+++ b/ql/src/test/queries/clientpositive/bucket3.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.enforce.bucketing = true;
 set hive.exec.reducers.max = 1;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/bucket4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket4.q 
b/ql/src/test/queries/clientpositive/bucket4.q
index 2b3f805..1b49c7a 100644
--- a/ql/src/test/queries/clientpositive/bucket4.q
+++ b/ql/src/test/queries/clientpositive/bucket4.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing = true;
 set hive.enforce.sorting = true;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q 
b/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
index 42e26a8..4a7d63e 100644
--- a/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
+++ b/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.auto.convert.join=true;
 set hive.auto.convert.join.noconditionaltask=true;
 set hive.auto.convert.join.noconditionaltask.size=10000;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q 
b/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q
index a3588ec..2f968bd 100644
--- a/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q
+++ b/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.auto.convert.join=true;
 set hive.auto.convert.join.noconditionaltask=true;
 set hive.auto.convert.join.noconditionaltask.size=10000;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/correlationoptimizer1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/correlationoptimizer1.q 
b/ql/src/test/queries/clientpositive/correlationoptimizer1.q
index 0596f96..51d2c10 100644
--- a/ql/src/test/queries/clientpositive/correlationoptimizer1.q
+++ b/ql/src/test/queries/clientpositive/correlationoptimizer1.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.auto.convert.join=false;
 set hive.optimize.correlation=false;
 -- This query has a GroupByOperator folling JoinOperator and they share the 
same keys.

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/count.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/count.q 
b/ql/src/test/queries/clientpositive/count.q
index 18721e5..ded8be8 100644
--- a/ql/src/test/queries/clientpositive/count.q
+++ b/ql/src/test/queries/clientpositive/count.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 -- SORT_QUERY_RESULTS
 create table abcd (a int, b int, c int, d int);
 LOAD DATA LOCAL INPATH '../../data/files/in4.txt' INTO TABLE abcd;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/cross_join.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cross_join.q 
b/ql/src/test/queries/clientpositive/cross_join.q
index 1f888dd..8eb949e 100644
--- a/ql/src/test/queries/clientpositive/cross_join.q
+++ b/ql/src/test/queries/clientpositive/cross_join.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 -- current
 explain select src.key from src join src src2;
 -- ansi cross join

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/cross_product_check_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cross_product_check_1.q 
b/ql/src/test/queries/clientpositive/cross_product_check_1.q
index 17a8833..e39912b 100644
--- a/ql/src/test/queries/clientpositive/cross_product_check_1.q
+++ b/ql/src/test/queries/clientpositive/cross_product_check_1.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 -- SORT_QUERY_RESULTS
 
 create table A as

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/cross_product_check_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cross_product_check_2.q 
b/ql/src/test/queries/clientpositive/cross_product_check_2.q
index de6b7f2..d7d251f 100644
--- a/ql/src/test/queries/clientpositive/cross_product_check_2.q
+++ b/ql/src/test/queries/clientpositive/cross_product_check_2.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 -- SORT_QUERY_RESULTS
 
 create table A as

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/ctas.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/ctas.q 
b/ql/src/test/queries/clientpositive/ctas.q
index 3435d03..edd1f6a 100644
--- a/ql/src/test/queries/clientpositive/ctas.q
+++ b/ql/src/test/queries/clientpositive/ctas.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S)
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q 
b/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q
index 7baca1a..d7f9ac8 100644
--- a/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q
+++ b/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.enforce.bucketing = true;
 set hive.exec.reducers.max = 1;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q 
b/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q
index 376e893..67c4740 100644
--- a/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q
+++ b/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.optimize.ppd=true;
 set hive.ppd.remove.duplicatefilters=true;
 set hive.tez.dynamic.partition.pruning=true;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q 
b/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q
index a4e84b1..4a9532d 100644
--- a/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q
+++ b/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.optimize.ppd=true;
 set hive.ppd.remove.duplicatefilters=true;
 set hive.tez.dynamic.partition.pruning=true;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q 
b/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q
index 78816ae..8001081 100644
--- a/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q
+++ b/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.optimize.sort.dynamic.partition=true;
 set hive.exec.dynamic.partition=true;
 set hive.exec.max.dynamic.partitions=1000;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q 
b/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q
index e459583..f842efe 100644
--- a/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q
+++ b/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.optimize.sort.dynamic.partition=true;
 set hive.exec.dynamic.partition=true;
 set hive.exec.max.dynamic.partitions=1000;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q 
b/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q
index 58319e3..5a504ec 100644
--- a/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q
+++ b/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.optimize.sort.dynamic.partition=true;
 set hive.exec.dynamic.partition=true;
 set hive.exec.max.dynamic.partitions=1000;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/explainuser_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/explainuser_3.q 
b/ql/src/test/queries/clientpositive/explainuser_3.q
new file mode 100644
index 0000000..16237bb
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/explainuser_3.q
@@ -0,0 +1,115 @@
+set hive.explain.user=true;
+
+explain select key, value
+FROM srcpart LATERAL VIEW explode(array(1,2,3)) myTable AS myCol;
+
+explain show tables;
+
+explain create database newDB location "/tmp/";
+
+create database newDB location "/tmp/";
+
+explain describe database extended newDB;
+
+describe database extended newDB;
+
+explain use newDB;
+
+use newDB;
+
+create table tab (name string);
+
+explain alter table tab rename to newName;
+
+explain drop table tab;
+
+drop table tab;
+
+explain use default;
+
+use default;
+
+drop database newDB;
+
+explain analyze table src compute statistics;
+
+explain analyze table src compute statistics for columns;
+
+explain
+CREATE TEMPORARY MACRO SIGMOID (x DOUBLE) 1.0 / (1.0 + EXP(-x));
+
+CREATE TEMPORARY MACRO SIGMOID (x DOUBLE) 1.0 / (1.0 + EXP(-x));
+
+EXPLAIN SELECT SIGMOID(2) FROM src LIMIT 1;
+explain DROP TEMPORARY MACRO SIGMOID;
+DROP TEMPORARY MACRO SIGMOID;
+
+explain create table src_autho_test as select * from src;
+create table src_autho_test as select * from src;
+
+set hive.security.authorization.enabled=true;
+
+explain grant select on table src_autho_test to user hive_test_user;
+grant select on table src_autho_test to user hive_test_user;
+
+explain show grant user hive_test_user on table src_autho_test;
+explain show grant user hive_test_user on table src_autho_test(key);
+
+select key from src_autho_test order by key limit 20;
+
+explain revoke select on table src_autho_test from user hive_test_user;
+
+explain grant select(key) on table src_autho_test to user hive_test_user;
+
+explain revoke select(key) on table src_autho_test from user hive_test_user;
+
+explain 
+create role sRc_roLE;
+
+create role sRc_roLE;
+
+explain
+grant role sRc_roLE to user hive_test_user;
+
+grant role sRc_roLE to user hive_test_user;
+
+explain show role grant user hive_test_user;
+
+explain drop role sRc_roLE;
+drop role sRc_roLE;
+
+set hive.security.authorization.enabled=false;
+drop table src_autho_test;
+
+explain drop view v;
+
+explain create view v as with cte as (select * from src  order by key limit 5)
+select * from cte;
+
+explain with cte as (select * from src  order by key limit 5)
+select * from cte;
+
+create table orc_merge5 (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp) stored as orc;
+
+load data local inpath '../../data/files/orc_split_elim.orc' into table 
orc_merge5;
+
+SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+SET mapred.min.split.size=1000;
+SET mapred.max.split.size=50000;
+SET hive.optimize.index.filter=true;
+set hive.merge.orcfile.stripe.level=false;
+set hive.merge.tezfiles=false;
+set hive.merge.mapfiles=false;
+set hive.merge.mapredfiles=false;
+set hive.compute.splits.in.am=true;
+set tez.grouping.min-size=1000;
+set tez.grouping.max-size=50000;
+
+set hive.merge.orcfile.stripe.level=true;
+set hive.merge.tezfiles=true;
+set hive.merge.mapfiles=true;
+set hive.merge.mapredfiles=true;
+
+explain insert overwrite table orc_merge5 select 
userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
+
+drop table orc_merge5;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/groupby1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby1.q 
b/ql/src/test/queries/clientpositive/groupby1.q
index 15f776f..df69cbd 100755
--- a/ql/src/test/queries/clientpositive/groupby1.q
+++ b/ql/src/test/queries/clientpositive/groupby1.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.map.aggr=false;
 set hive.groupby.skewindata=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/groupby2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby2.q 
b/ql/src/test/queries/clientpositive/groupby2.q
index 2bf0d7a..1966ee7 100755
--- a/ql/src/test/queries/clientpositive/groupby2.q
+++ b/ql/src/test/queries/clientpositive/groupby2.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.map.aggr=false;
 set hive.groupby.skewindata=true;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/groupby3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby3.q 
b/ql/src/test/queries/clientpositive/groupby3.q
index 8f24584..a9b4039 100755
--- a/ql/src/test/queries/clientpositive/groupby3.q
+++ b/ql/src/test/queries/clientpositive/groupby3.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.map.aggr=false;
 set hive.groupby.skewindata=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/having.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/having.q 
b/ql/src/test/queries/clientpositive/having.q
index 6abc8ae..fdba5cd 100644
--- a/ql/src/test/queries/clientpositive/having.q
+++ b/ql/src/test/queries/clientpositive/having.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 -- SORT_QUERY_RESULTS
 EXPLAIN SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3;
 SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/hybridgrace_hashjoin_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/hybridgrace_hashjoin_1.q 
b/ql/src/test/queries/clientpositive/hybridgrace_hashjoin_1.q
index c7d925e..f98dfa9 100644
--- a/ql/src/test/queries/clientpositive/hybridgrace_hashjoin_1.q
+++ b/ql/src/test/queries/clientpositive/hybridgrace_hashjoin_1.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 -- Hybrid Grace Hash Join
 -- Test basic functionalities:
 -- 1. Various cases when hash partitions spill

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/hybridgrace_hashjoin_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/hybridgrace_hashjoin_2.q 
b/ql/src/test/queries/clientpositive/hybridgrace_hashjoin_2.q
index b3ee414..d6a5250 100644
--- a/ql/src/test/queries/clientpositive/hybridgrace_hashjoin_2.q
+++ b/ql/src/test/queries/clientpositive/hybridgrace_hashjoin_2.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 -- Hybrid Grace Hash Join
 -- Test n-way join
 SELECT 1;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/insert_into1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_into1.q 
b/ql/src/test/queries/clientpositive/insert_into1.q
index 7271a07..1b7db5c 100644
--- a/ql/src/test/queries/clientpositive/insert_into1.q
+++ b/ql/src/test/queries/clientpositive/insert_into1.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.compute.query.using.stats=true;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/insert_into2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/insert_into2.q 
b/ql/src/test/queries/clientpositive/insert_into2.q
index a53f7f4..7183c75 100644
--- a/ql/src/test/queries/clientpositive/insert_into2.q
+++ b/ql/src/test/queries/clientpositive/insert_into2.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.compute.query.using.stats=true;
 DROP TABLE insert_into2;
 CREATE TABLE insert_into2 (key int, value string) 

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/join0.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join0.q 
b/ql/src/test/queries/clientpositive/join0.q
index 5d8356b..6ef6843 100644
--- a/ql/src/test/queries/clientpositive/join0.q
+++ b/ql/src/test/queries/clientpositive/join0.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 -- JAVA_VERSION_SPECIFIC_OUTPUT
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/join1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join1.q 
b/ql/src/test/queries/clientpositive/join1.q
index a388683..de97e8c 100644
--- a/ql/src/test/queries/clientpositive/join1.q
+++ b/ql/src/test/queries/clientpositive/join1.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 
 -- SORT_QUERY_RESULTS

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/join_nullsafe.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join_nullsafe.q 
b/ql/src/test/queries/clientpositive/join_nullsafe.q
index 46bbadd..d6eda77 100644
--- a/ql/src/test/queries/clientpositive/join_nullsafe.q
+++ b/ql/src/test/queries/clientpositive/join_nullsafe.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 -- SORT_QUERY_RESULTS
 
 CREATE TABLE myinput1(key int, value int);

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/limit_pushdown.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/limit_pushdown.q 
b/ql/src/test/queries/clientpositive/limit_pushdown.q
index 3940564..74030e3 100644
--- a/ql/src/test/queries/clientpositive/limit_pushdown.q
+++ b/ql/src/test/queries/clientpositive/limit_pushdown.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.limit.pushdown.memory.usage=0.3f;
 set hive.optimize.reducededuplication.min.reducer=1;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/load_dyn_part1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/load_dyn_part1.q 
b/ql/src/test/queries/clientpositive/load_dyn_part1.q
index df1ed31..68323ab 100644
--- a/ql/src/test/queries/clientpositive/load_dyn_part1.q
+++ b/ql/src/test/queries/clientpositive/load_dyn_part1.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 -- SORT_QUERY_RESULTS
 
 show partitions srcpart;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/load_dyn_part2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/load_dyn_part2.q 
b/ql/src/test/queries/clientpositive/load_dyn_part2.q
index eb4e2d5..e804971 100644
--- a/ql/src/test/queries/clientpositive/load_dyn_part2.q
+++ b/ql/src/test/queries/clientpositive/load_dyn_part2.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 -- SORT_QUERY_RESULTS
 
 create table if not exists nzhang_part_bucket (key string, value string) 

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/load_dyn_part3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/load_dyn_part3.q 
b/ql/src/test/queries/clientpositive/load_dyn_part3.q
index 4fb3860..07423fd 100644
--- a/ql/src/test/queries/clientpositive/load_dyn_part3.q
+++ b/ql/src/test/queries/clientpositive/load_dyn_part3.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 -- SORT_QUERY_RESULTS
 
 show partitions srcpart;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/lvj_mapjoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/lvj_mapjoin.q 
b/ql/src/test/queries/clientpositive/lvj_mapjoin.q
index 4a391b4..b726e2a 100644
--- a/ql/src/test/queries/clientpositive/lvj_mapjoin.q
+++ b/ql/src/test/queries/clientpositive/lvj_mapjoin.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 -- SORT_QUERY_RESULTS
 
 drop table sour1;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/mapjoin_decimal.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mapjoin_decimal.q 
b/ql/src/test/queries/clientpositive/mapjoin_decimal.q
index 7299808..105195b 100644
--- a/ql/src/test/queries/clientpositive/mapjoin_decimal.q
+++ b/ql/src/test/queries/clientpositive/mapjoin_decimal.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.auto.convert.join=true;
 set hive.auto.convert.join.noconditionaltask=true;
 set hive.auto.convert.join.noconditionaltask.size=10000000;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q 
b/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q
index 7f66ff2..9723b3a 100644
--- a/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q
+++ b/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.auto.convert.join=true;
 set hive.auto.convert.join.noconditionaltask=true;
 set hive.auto.convert.join.noconditionaltask.size=10000;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/mapreduce1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mapreduce1.q 
b/ql/src/test/queries/clientpositive/mapreduce1.q
index 83328f1..e2b314f 100644
--- a/ql/src/test/queries/clientpositive/mapreduce1.q
+++ b/ql/src/test/queries/clientpositive/mapreduce1.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE;
 
 EXPLAIN

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/mapreduce2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mapreduce2.q 
b/ql/src/test/queries/clientpositive/mapreduce2.q
index 7539d3f..0398b49 100644
--- a/ql/src/test/queries/clientpositive/mapreduce2.q
+++ b/ql/src/test/queries/clientpositive/mapreduce2.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE;
 
 EXPLAIN

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/merge1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/merge1.q 
b/ql/src/test/queries/clientpositive/merge1.q
index 3000262..847a50b 100644
--- a/ql/src/test/queries/clientpositive/merge1.q
+++ b/ql/src/test/queries/clientpositive/merge1.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.merge.mapredfiles=true;
 set hive.merge.sparkfiles=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/merge2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/merge2.q 
b/ql/src/test/queries/clientpositive/merge2.q
index b0f01ce..c36a909 100644
--- a/ql/src/test/queries/clientpositive/merge2.q
+++ b/ql/src/test/queries/clientpositive/merge2.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=true;
 set hive.merge.sparkfiles=true;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/mergejoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mergejoin.q 
b/ql/src/test/queries/clientpositive/mergejoin.q
index f89f413..7550e09 100644
--- a/ql/src/test/queries/clientpositive/mergejoin.q
+++ b/ql/src/test/queries/clientpositive/mergejoin.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.join.emit.interval=100000;
 set hive.optimize.ppd=true;
 set hive.ppd.remove.duplicatefilters=true;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/metadata_only_queries.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/metadata_only_queries.q 
b/ql/src/test/queries/clientpositive/metadata_only_queries.q
index c7ae739..56f3a78 100644
--- a/ql/src/test/queries/clientpositive/metadata_only_queries.q
+++ b/ql/src/test/queries/clientpositive/metadata_only_queries.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.stats.dbclass=fs;
 set hive.compute.query.using.stats=true;
 set hive.stats.autogather=true;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/mrr.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mrr.q 
b/ql/src/test/queries/clientpositive/mrr.q
index bd379d2..6960547 100644
--- a/ql/src/test/queries/clientpositive/mrr.q
+++ b/ql/src/test/queries/clientpositive/mrr.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 -- simple query with multiple reduce stages
 -- SORT_QUERY_RESULTS
 

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/orc_merge1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge1.q 
b/ql/src/test/queries/clientpositive/orc_merge1.q
index 1c0bf41..a8ac85b 100644
--- a/ql/src/test/queries/clientpositive/orc_merge1.q
+++ b/ql/src/test/queries/clientpositive/orc_merge1.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.merge.orcfile.stripe.level=false;
 set hive.exec.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/orc_merge2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge2.q 
b/ql/src/test/queries/clientpositive/orc_merge2.q
index 9ffc4bc..44ef280 100644
--- a/ql/src/test/queries/clientpositive/orc_merge2.q
+++ b/ql/src/test/queries/clientpositive/orc_merge2.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.merge.orcfile.stripe.level=true;
 set hive.exec.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;

http://git-wip-us.apache.org/repos/asf/hive/blob/8842dcaf/ql/src/test/queries/clientpositive/orc_merge3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge3.q 
b/ql/src/test/queries/clientpositive/orc_merge3.q
index 444ea65..9722e6d 100644
--- a/ql/src/test/queries/clientpositive/orc_merge3.q
+++ b/ql/src/test/queries/clientpositive/orc_merge3.q
@@ -1,3 +1,4 @@
+set hive.explain.user=false;
 set hive.merge.orcfile.stripe.level=true;
 
 DROP TABLE orcfile_merge3a;

Reply via email to