HIVE-12727 : refactor Hive strict checks to be more granular, allow order by no 
limit and no partition filter by default for now (Sergey Shelukhin, reviewed by 
Xuefu Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/27800976
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/27800976
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/27800976

Branch: refs/heads/branch-2.0
Commit: 27800976df202668835436fb69a663311050af4f
Parents: 2961b0a
Author: Sergey Shelukhin <ser...@apache.org>
Authored: Fri Jan 29 12:17:08 2016 -0800
Committer: Sergey Shelukhin <ser...@apache.org>
Committed: Fri Jan 29 12:17:08 2016 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   | 65 +++++++++++--
 .../calcite/translator/HiveOpConverter.java     | 42 ++++-----
 .../hive/ql/optimizer/ppr/PartitionPruner.java  | 12 ++-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    | 19 ++--
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  | 20 ++--
 .../hive/ql/plan/ExprNodeGenericFuncDesc.java   | 19 ++--
 .../queries/clientnegative/invalid_distinct1.q  |  1 +
 .../queries/clientnegative/invalid_distinct3.q  |  1 +
 .../clientnegative/alter_view_failure6.q.out    |  2 +-
 .../clientnegative/compare_double_bigint.q.out  |  2 +-
 .../clientnegative/compare_string_bigint.q.out  |  2 +-
 ql/src/test/results/clientnegative/input4.q.out |  2 +-
 .../clientnegative/input_part0_neg.q.out        |  2 +-
 .../clientnegative/invalid_distinct1.q.out      |  2 +-
 .../clientnegative/invalid_distinct3.q.out      |  2 +-
 .../results/clientnegative/strict_join.q.out    |  2 +-
 .../results/clientnegative/strict_orderby.q.out |  2 +-
 .../results/clientnegative/strict_pruning.q.out |  2 +-
 .../subquery_notexists_implicit_gby.q.out       |  2 +-
 .../clientpositive/create_genericudf.q.out      |  8 +-
 ql/src/test/results/clientpositive/cte_2.q.out  |  2 +-
 .../results/clientpositive/empty_join.q.out     | 83 ++++++++++-------
 .../clientpositive/groupby_duplicate_key.q.out  |  4 +-
 .../test/results/clientpositive/insert1.q.out   |  2 +-
 .../join_cond_pushdown_unqual5.q.out            | 96 +++++++++++---------
 .../clientpositive/literal_decimal.q.out        |  6 +-
 .../clientpositive/load_dyn_part14.q.out        | 18 ++--
 .../results/clientpositive/offset_limit.q.out   |  8 +-
 .../offset_limit_ppd_optimizer.q.out            | 38 ++++----
 .../clientpositive/rand_partitionpruner2.q.out  |  2 +-
 .../results/clientpositive/smb_mapjoin_18.q.out | 29 ++++--
 .../results/clientpositive/spark/insert1.q.out  |  2 +-
 .../clientpositive/spark/load_dyn_part14.q.out  |  6 +-
 .../clientpositive/spark/smb_mapjoin_18.q.out   | 32 +++++--
 .../clientpositive/spark/smb_mapjoin_8.q.out    | 22 ++---
 .../clientpositive/spark/union_date_trim.q.out  |  2 +-
 .../clientpositive/stats_empty_partition.q.out  |  2 +-
 .../results/clientpositive/tez/empty_join.q.out | 75 +++++++++------
 .../tez/vector_when_case_null.q.out             |  8 +-
 .../clientpositive/udf_concat_insert2.q.out     |  2 +-
 .../clientpositive/union_date_trim.q.out        |  2 +-
 .../clientpositive/union_remove_26.q.out        | 54 +++++------
 .../clientpositive/vector_when_case_null.q.out  |  8 +-
 43 files changed, 410 insertions(+), 302 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index a11d1bc..2fb283d 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hive.conf;
 
+import com.google.common.base.Joiner;
 import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.IOException;
@@ -56,8 +57,6 @@ import org.apache.hive.common.HiveCompat;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.base.Joiner;
-
 /**
  * Hive Configuration.
  */
@@ -780,14 +779,21 @@ public class HiveConf extends Configuration {
         "hive.txn.valid.txns,hive.script.operator.env.blacklist",
         "Comma separated list of keys from the configuration file not to 
convert to environment " +
         "variables when envoking the script operator"),
-    HIVEMAPREDMODE("hive.mapred.mode", "strict",
-        "The mode in which the Hive operations are being performed. \n" +
-        "In strict mode, some risky queries are not allowed to run. They 
include:\n" +
-        "  Cartesian Product.\n" +
-        "  No partition being picked up for a query.\n" +
+    HIVE_STRICT_CHECKS_LARGE_QUERY("hive.strict.checks.large.query", false,
+        "Enabling strict large query checks disallows the following:\n" +
+        "  Orderby without limit.\n" +
+        "  No partition being picked up for a query against partitioned 
table.\n" +
+        "Note that these checks currently do not consider data size, only the 
query pattern."),
+    HIVE_STRICT_CHECKS_TYPE_SAFETY("hive.strict.checks.type.safety", true,
+        "Enabling strict type safety checks disallows the following:\n" +
         "  Comparing bigints and strings.\n" +
-        "  Comparing bigints and doubles.\n" +
-        "  Orderby without limit."),
+        "  Comparing bigints and doubles."),
+    HIVE_STRICT_CHECKS_CARTESIAN("hive.strict.checks.cartesian.product", true,
+        "Enabling strict large query checks disallows the following:\n" +
+        "  Cartesian product (cross join)."),
+    @Deprecated
+    HIVEMAPREDMODE("hive.mapred.mode", "nonstrict",
+        "Deprecated; use hive.strict.checks.* settings instead."),
     HIVEALIAS("hive.alias", "", ""),
     HIVEMAPSIDEAGGREGATE("hive.map.aggr", true, "Whether to use map-side 
aggregation in Hive Group By queries"),
     HIVEGROUPBYSKEW("hive.groupby.skewindata", false, "Whether there is skew 
in data to optimize group by queries"),
@@ -3696,6 +3702,47 @@ public class HiveConf extends Configuration {
     HiveConf.loadHiveServer2Config = loadHiveServer2Config;
   }
 
+  public static class StrictChecks {
+
+    private static final String NO_LIMIT_MSG = makeMessage(
+        "Order by-s without limit", ConfVars.HIVE_STRICT_CHECKS_LARGE_QUERY);
+    private static final String NO_PARTITIONLESS_MSG = makeMessage(
+        "Queries against partitioned tables without a partition filter",
+        ConfVars.HIVE_STRICT_CHECKS_LARGE_QUERY);
+    private static final String NO_COMPARES_MSG = makeMessage(
+        "Unsafe compares between different types", 
ConfVars.HIVE_STRICT_CHECKS_TYPE_SAFETY);
+    private static final String NO_CARTESIAN_MSG = makeMessage(
+        "Cartesian products", ConfVars.HIVE_STRICT_CHECKS_CARTESIAN);
+
+    private static String makeMessage(String what, ConfVars setting) {
+      return what + " are disabled for safety reasons. If you know what you 
are doing, please make"
+          + " sure that " + setting.varname + " is set to false and that "
+          + ConfVars.HIVEMAPREDMODE.varname + " is not set to 'strict' to 
enable them.";
+    }
+
+    public static String checkNoLimit(Configuration conf) {
+      return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_LARGE_QUERY) ? null : 
NO_LIMIT_MSG;
+    }
+
+    public static String checkNoPartitionFilter(Configuration conf) {
+      return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_LARGE_QUERY)
+          ? null : NO_PARTITIONLESS_MSG;
+    }
+
+    public static String checkTypeSafety(Configuration conf) {
+      return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_TYPE_SAFETY) ? null : 
NO_COMPARES_MSG;
+    }
+
+    public static String checkCartesian(Configuration conf) {
+      return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_CARTESIAN) ? null : 
NO_CARTESIAN_MSG;
+    }
+
+    private static boolean isAllowed(Configuration conf, ConfVars setting) {
+      String mode = HiveConf.getVar(conf, ConfVars.HIVEMAPREDMODE, null);
+      return (mode != null) ? !"strict".equals(mode) : 
!HiveConf.getBoolVar(conf, setting);
+    }
+  }
+
   public static String getNonMrEngines() {
     String result = "";
     for (String s : ConfVars.HIVE_EXECUTION_ENGINE.getValidStringValues()) {

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
index 5c0bf0d..a2e9480 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hive.ql.optimizer.calcite.translator;
 
+
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -43,7 +44,7 @@ import org.apache.calcite.util.Pair;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.conf.HiveConf.StrictChecks;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
 import org.apache.hadoop.hive.ql.exec.FilterOperator;
 import org.apache.hadoop.hive.ql.exec.JoinOperator;
@@ -120,17 +121,14 @@ public class HiveOpConverter {
   private final HiveConf                                      hiveConf;
   private final UnparseTranslator                             
unparseTranslator;
   private final Map<String, Operator<? extends OperatorDesc>> topOps;
-  private final boolean                                       strictMode;
   private int                                                 uniqueCounter;
 
   public HiveOpConverter(SemanticAnalyzer semanticAnalyzer, HiveConf hiveConf,
-      UnparseTranslator unparseTranslator, Map<String, Operator<? extends 
OperatorDesc>> topOps,
-      boolean strictMode) {
+      UnparseTranslator unparseTranslator, Map<String, Operator<? extends 
OperatorDesc>> topOps) {
     this.semanticAnalyzer = semanticAnalyzer;
     this.hiveConf = hiveConf;
     this.unparseTranslator = unparseTranslator;
     this.topOps = topOps;
-    this.strictMode = strictMode;
     this.uniqueCounter = 0;
   }
 
@@ -424,10 +422,10 @@ public class HiveOpConverter {
     // of their columns
     if (sortRel.getCollation() != RelCollations.EMPTY) {
 
-      // In strict mode, in the presence of order by, limit must be
-      // specified
-      if (strictMode && sortRel.fetch == null) {
-        throw new SemanticException(ErrorMsg.NO_LIMIT_WITH_ORDERBY.getMsg());
+      // In strict mode, in the presence of order by, limit must be specified.
+      if (sortRel.fetch == null) {
+        String error = StrictChecks.checkNoLimit(hiveConf);
+        if (error != null) throw new SemanticException(error);
       }
 
       // 1.a. Extract order for each column from collation
@@ -476,7 +474,7 @@ public class HiveOpConverter {
       // 1.b. Generate reduce sink and project operator
       resultOp = genReduceSinkAndBacktrackSelect(resultOp,
           sortCols.toArray(new ExprNodeDesc[sortCols.size()]), 0, new 
ArrayList<ExprNodeDesc>(),
-          order.toString(), numReducers, Operation.NOT_ACID, strictMode, 
keepColumns);
+          order.toString(), numReducers, Operation.NOT_ACID, hiveConf, 
keepColumns);
     }
 
     // 2. If we need to generate limit
@@ -606,7 +604,7 @@ public class HiveOpConverter {
     exchangeRel.setJoinExpressions(expressions);
 
     ReduceSinkOperator rsOp = genReduceSink(inputOpAf.inputs.get(0), tabAlias, 
expressions,
-        -1, -1, Operation.NOT_ACID, strictMode);
+        -1, -1, Operation.NOT_ACID, hiveConf);
 
     return new OpAttr(tabAlias, inputOpAf.vcolsInCalcite, rsOp);
   }
@@ -654,7 +652,7 @@ public class HiveOpConverter {
 
       SelectOperator selectOp = genReduceSinkAndBacktrackSelect(input,
           keyCols.toArray(new ExprNodeDesc[keyCols.size()]), 0, partCols,
-          order.toString(), -1, Operation.NOT_ACID, strictMode);
+          order.toString(), -1, Operation.NOT_ACID, hiveConf);
 
       // 2. Finally create PTF
       PTFTranslator translator = new PTFTranslator();
@@ -679,14 +677,15 @@ public class HiveOpConverter {
 
   private static SelectOperator genReduceSinkAndBacktrackSelect(Operator<?> 
input,
           ExprNodeDesc[] keys, int tag, ArrayList<ExprNodeDesc> partitionCols, 
String order,
-          int numReducers, Operation acidOperation, boolean strictMode) throws 
SemanticException {
+          int numReducers, Operation acidOperation, HiveConf hiveConf)
+              throws SemanticException {
     return genReduceSinkAndBacktrackSelect(input, keys, tag, partitionCols, 
order,
-        numReducers, acidOperation, strictMode, 
input.getSchema().getColumnNames());
+        numReducers, acidOperation, hiveConf, 
input.getSchema().getColumnNames());
   }
 
   private static SelectOperator genReduceSinkAndBacktrackSelect(Operator<?> 
input,
       ExprNodeDesc[] keys, int tag, ArrayList<ExprNodeDesc> partitionCols, 
String order,
-      int numReducers, Operation acidOperation, boolean strictMode,
+      int numReducers, Operation acidOperation, HiveConf hiveConf,
       List<String> keepColNames) throws SemanticException {
     // 1. Generate RS operator
     // 1.1 Prune the tableNames, only count the tableNames that are not empty 
strings
@@ -716,7 +715,7 @@ public class HiveOpConverter {
           "In CBO return path, genReduceSinkAndBacktrackSelect is expecting 
only one tableAlias but there is none");
     }
     // 1.2 Now generate RS operator
-    ReduceSinkOperator rsOp = genReduceSink(input, tableAlias, keys, tag, 
partitionCols, order, numReducers, acidOperation, strictMode);
+    ReduceSinkOperator rsOp = genReduceSink(input, tableAlias, keys, tag, 
partitionCols, order, numReducers, acidOperation, hiveConf);
 
     // 2. Generate backtrack Select operator
     Map<String, ExprNodeDesc> descriptors = 
buildBacktrackFromReduceSink(keepColNames,
@@ -737,15 +736,15 @@ public class HiveOpConverter {
   }
 
   private static ReduceSinkOperator genReduceSink(Operator<?> input, String 
tableAlias, ExprNodeDesc[] keys, int tag,
-      int numReducers, Operation acidOperation, boolean strictMode) throws 
SemanticException {
+      int numReducers, Operation acidOperation, HiveConf hiveConf) throws 
SemanticException {
     return genReduceSink(input, tableAlias, keys, tag, new 
ArrayList<ExprNodeDesc>(), "", numReducers,
-        acidOperation, strictMode);
+        acidOperation, hiveConf);
   }
 
   @SuppressWarnings({ "rawtypes", "unchecked" })
   private static ReduceSinkOperator genReduceSink(Operator<?> input, String 
tableAlias, ExprNodeDesc[] keys, int tag,
       ArrayList<ExprNodeDesc> partitionCols, String order, int numReducers,
-      Operation acidOperation, boolean strictMode) throws SemanticException {
+      Operation acidOperation, HiveConf hiveConf) throws SemanticException {
     Operator dummy = Operator.createDummy(); // dummy for backtracking
     dummy.setParentOperators(Arrays.asList(input));
 
@@ -809,9 +808,8 @@ public class HiveOpConverter {
       numReducers = 1;
 
       // Cartesian product is not supported in strict mode
-      if (strictMode) {
-        throw new SemanticException(ErrorMsg.NO_CARTESIAN_PRODUCT.getMsg());
-      }
+      String error = StrictChecks.checkCartesian(hiveConf);
+      if (error != null) throw new SemanticException(error);
     }
 
     ReduceSinkDesc rsDesc;

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
index 90e9b3f..02c5a89 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
@@ -33,11 +33,11 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.StrictChecks;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
@@ -181,11 +181,13 @@ public class PartitionPruner extends Transform {
       return getAllPartsFromCacheOrServer(tab, key, false, 
prunedPartitionsMap);
     }
 
-    if ("strict".equalsIgnoreCase(HiveConf.getVar(conf, 
HiveConf.ConfVars.HIVEMAPREDMODE))
-        && !hasColumnExpr(prunerExpr)) {
+    if (!hasColumnExpr(prunerExpr)) {
       // If the "strict" mode is on, we have to provide partition pruner for 
each table.
-      throw new SemanticException(ErrorMsg.NO_PARTITION_PREDICATE
-          .getMsg("for Alias \"" + alias + "\" Table \"" + tab.getTableName() 
+ "\""));
+      String error = StrictChecks.checkNoPartitionFilter(conf);
+      if (error != null) {
+        throw new SemanticException(error + " No partition predicate for Alias 
\""
+            + alias + "\" Table \"" + tab.getTableName() + "\"");
+      }
     }
 
     if (prunerExpr == null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 4ce9111..411c6d1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -100,6 +100,7 @@ import org.apache.calcite.util.ImmutableIntList;
 import org.apache.calcite.util.Pair;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.conf.HiveConf.StrictChecks;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.QueryProperties;
@@ -439,8 +440,9 @@ public class CalcitePlanner extends SemanticAnalyzer {
   static String canHandleQbForCbo(QueryProperties queryProperties, HiveConf 
conf,
       boolean topLevelQB, boolean verbose, QB qb) {
     boolean isInTest = conf.getBoolVar(ConfVars.HIVE_IN_TEST);
+    // TODO: HIVEMAPREDMODE is deprecated. Why does this test-only exception 
exist?
     boolean isStrictTest = isInTest
-        && !conf.getVar(ConfVars.HIVEMAPREDMODE).equalsIgnoreCase("nonstrict");
+        && "strict".equals(HiveConf.getVar(conf, ConfVars.HIVEMAPREDMODE));
 
     if (!isStrictTest
         && !queryProperties.hasClusterBy() && 
!queryProperties.hasDistributeBy()
@@ -454,7 +456,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
     String msg = "";
     if (verbose) {
       if (isStrictTest)
-        msg += "is in test running in mode other than nonstrict; ";
+        msg += "is in test running in strict mode (deprecated); ";
       if (queryProperties.hasClusterBy())
         msg += "has cluster by; ";
       if (queryProperties.hasDistributeBy())
@@ -684,8 +686,8 @@ public class CalcitePlanner extends SemanticAnalyzer {
         optimizedOptiqPlan, resultSchema, this.getQB().getTableDesc() != null);
 
     LOG.debug("Translating the following plan:\n" + 
RelOptUtil.toString(modifiedOptimizedOptiqPlan));
-    Operator<?> hiveRoot = new HiveOpConverter(this, conf, unparseTranslator, 
topOps,
-        
conf.getVar(HiveConf.ConfVars.HIVEMAPREDMODE).equalsIgnoreCase("strict")).convert(modifiedOptimizedOptiqPlan);
+    Operator<?> hiveRoot = new HiveOpConverter(this, conf, unparseTranslator, 
topOps)
+                                  .convert(modifiedOptimizedOptiqPlan);
     RowResolver hiveRootRR = genRowResolver(hiveRoot, getQB());
     opParseCtx.put(hiveRoot, new OpParseContext(hiveRootRR));
     String dest = getQB().getParseInfo().getClauseNames().iterator().next();
@@ -2348,10 +2350,11 @@ public class CalcitePlanner extends SemanticAnalyzer {
         // 1. OB Expr sanity test
         // in strict mode, in the presence of order by, limit must be specified
         Integer limit = qb.getParseInfo().getDestLimit(dest);
-        if 
(conf.getVar(HiveConf.ConfVars.HIVEMAPREDMODE).equalsIgnoreCase("strict")
-            && limit == null) {
-          throw new 
SemanticException(SemanticAnalyzer.generateErrorMessage(obAST,
-              ErrorMsg.NO_LIMIT_WITH_ORDERBY.getMsg()));
+        if (limit == null) {
+          String error = StrictChecks.checkNoLimit(conf);
+          if (error != null) {
+            throw new 
SemanticException(SemanticAnalyzer.generateErrorMessage(obAST, error));
+          }
         }
 
         // 2. Walk through OB exprs and extract field collations and additional

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 6e59da7..883bd0a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.common.StatsSetupConst.StatDB;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.conf.HiveConf.StrictChecks;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.Warehouse;
@@ -7120,12 +7121,11 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
       if (sortExprs != null) {
         assert numReducers == 1;
         // in strict mode, in the presence of order by, limit must be specified
-        Integer limit = qb.getParseInfo().getDestLimit(dest);
-        if (conf.getVar(HiveConf.ConfVars.HIVEMAPREDMODE).equalsIgnoreCase(
-            "strict")
-            && limit == null) {
-          throw new SemanticException(generateErrorMessage(sortExprs,
-              ErrorMsg.NO_LIMIT_WITH_ORDERBY.getMsg()));
+        if (qb.getParseInfo().getDestLimit(dest) == null) {
+          String error = StrictChecks.checkNoLimit(conf);
+          if (error != null) {
+            throw new SemanticException(generateErrorMessage(sortExprs, 
error));
+          }
         }
       }
     }
@@ -7486,12 +7486,8 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
     // Use only 1 reducer in case of cartesian product
     if (reduceKeys.size() == 0) {
       numReds = 1;
-
-      // Cartesian product is not supported in strict mode
-      if (conf.getVar(HiveConf.ConfVars.HIVEMAPREDMODE).equalsIgnoreCase(
-          "strict")) {
-        throw new SemanticException(ErrorMsg.NO_CARTESIAN_PRODUCT.getMsg());
-      }
+      String error = StrictChecks.checkCartesian(conf);
+      if (error != null) throw new SemanticException(error);
     }
 
     ReduceSinkDesc rsDesc = PlanUtils.getReduceSinkDesc(reduceKeys,

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeGenericFuncDesc.java
----------------------------------------------------------------------
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeGenericFuncDesc.java 
b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeGenericFuncDesc.java
index 25c2ff3..b7c1445 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeGenericFuncDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeGenericFuncDesc.java
@@ -27,8 +27,7 @@ import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.conf.HiveConf.StrictChecks;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
 import org.apache.hadoop.hive.ql.exec.Utilities;
@@ -217,18 +216,14 @@ public class ExprNodeGenericFuncDesc extends ExprNodeDesc 
implements
       // For now, if a bigint is going to be cast to a double throw an error 
or warning
       if ((oiTypeInfo0.equals(TypeInfoFactory.stringTypeInfo) && 
oiTypeInfo1.equals(TypeInfoFactory.longTypeInfo)) ||
           (oiTypeInfo0.equals(TypeInfoFactory.longTypeInfo) && 
oiTypeInfo1.equals(TypeInfoFactory.stringTypeInfo))) {
-        if (HiveConf.getVar(conf, 
HiveConf.ConfVars.HIVEMAPREDMODE).equalsIgnoreCase("strict")) {
-          throw new 
UDFArgumentException(ErrorMsg.NO_COMPARE_BIGINT_STRING.getMsg());
-        } else {
-          console.printError("WARNING: Comparing a bigint and a string may 
result in a loss of precision.");
-        }
+        String error = StrictChecks.checkTypeSafety(conf);
+        if (error != null) throw new UDFArgumentException(error);
+        console.printError("WARNING: Comparing a bigint and a string may 
result in a loss of precision.");
       } else if ((oiTypeInfo0.equals(TypeInfoFactory.doubleTypeInfo) && 
oiTypeInfo1.equals(TypeInfoFactory.longTypeInfo)) ||
           (oiTypeInfo0.equals(TypeInfoFactory.longTypeInfo) && 
oiTypeInfo1.equals(TypeInfoFactory.doubleTypeInfo))) {
-        if (HiveConf.getVar(conf, 
HiveConf.ConfVars.HIVEMAPREDMODE).equalsIgnoreCase("strict")) {
-          throw new 
UDFArgumentException(ErrorMsg.NO_COMPARE_BIGINT_DOUBLE.getMsg());
-        } else {
-          console.printError("WARNING: Comparing a bigint and a double may 
result in a loss of precision.");
-        }
+        String error = StrictChecks.checkTypeSafety(conf);
+        if (error != null) throw new UDFArgumentException(error);
+        console.printError("WARNING: Comparing a bigint and a double may 
result in a loss of precision.");
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/queries/clientnegative/invalid_distinct1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/invalid_distinct1.q 
b/ql/src/test/queries/clientnegative/invalid_distinct1.q
index 538ef81..6f688e3 100644
--- a/ql/src/test/queries/clientnegative/invalid_distinct1.q
+++ b/ql/src/test/queries/clientnegative/invalid_distinct1.q
@@ -1 +1,2 @@
+set hive.cbo.enable=false;
 explain select hash(distinct value) from src;

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/queries/clientnegative/invalid_distinct3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/invalid_distinct3.q 
b/ql/src/test/queries/clientnegative/invalid_distinct3.q
index ec8026b..e05acca 100644
--- a/ql/src/test/queries/clientnegative/invalid_distinct3.q
+++ b/ql/src/test/queries/clientnegative/invalid_distinct3.q
@@ -1 +1,2 @@
+set hive.cbo.enable=false;
 explain select hash(upper(distinct value)) from src;

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientnegative/alter_view_failure6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/alter_view_failure6.q.out 
b/ql/src/test/results/clientnegative/alter_view_failure6.q.out
index 8726ca0..70d7622 100644
--- a/ql/src/test/results/clientnegative/alter_view_failure6.q.out
+++ b/ql/src/test/results/clientnegative/alter_view_failure6.q.out
@@ -18,5 +18,5 @@ POSTHOOK: type: CREATEVIEW
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@xxx7
-FAILED: SemanticException [Error 10041]: No partition predicate found for 
Alias "default.xxx7:srcpart" Table "srcpart"
+FAILED: SemanticException Queries against partitioned tables without a 
partition filter are disabled for safety reasons. If you know what you are 
doing, please make sure that hive.strict.checks.large.query is set to false and 
that hive.mapred.mode is not set to 'strict' to enable them. No partition 
predicate for Alias "default.xxx7:srcpart" Table "srcpart"
 FAILED: SemanticException [Error 10056]: The query does not reference any 
valid partition. To run this query, set hive.mapred.mode=nonstrict

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientnegative/compare_double_bigint.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/compare_double_bigint.q.out 
b/ql/src/test/results/clientnegative/compare_double_bigint.q.out
index 68a295a..10a5667 100644
--- a/ql/src/test/results/clientnegative/compare_double_bigint.q.out
+++ b/ql/src/test/results/clientnegative/compare_double_bigint.q.out
@@ -1 +1 @@
-FAILED: SemanticException Line 0:-1 Wrong arguments '1.0': In strict mode, 
comparing bigints and doubles is not allowed, it may result in a loss of 
precision. If you really want to perform the operation, set 
hive.mapred.mode=nonstrict
+FAILED: SemanticException Line 0:-1 Wrong arguments '1.0': Unsafe compares 
between different types are disabled for safety reasons. If you know what you 
are doing, please make sure that hive.strict.checks.type.safety is set to false 
and that hive.mapred.mode is not set to 'strict' to enable them.

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientnegative/compare_string_bigint.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/compare_string_bigint.q.out 
b/ql/src/test/results/clientnegative/compare_string_bigint.q.out
index 9a604fa..c8eb290 100644
--- a/ql/src/test/results/clientnegative/compare_string_bigint.q.out
+++ b/ql/src/test/results/clientnegative/compare_string_bigint.q.out
@@ -1 +1 @@
-FAILED: SemanticException Line 0:-1 Wrong arguments ''1'': In strict mode, 
comparing bigints and strings is not allowed, it may result in a loss of 
precision. If you really want to perform the operation, set 
hive.mapred.mode=nonstrict
+FAILED: SemanticException Line 0:-1 Wrong arguments ''1'': Unsafe compares 
between different types are disabled for safety reasons. If you know what you 
are doing, please make sure that hive.strict.checks.type.safety is set to false 
and that hive.mapred.mode is not set to 'strict' to enable them.

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientnegative/input4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/input4.q.out 
b/ql/src/test/results/clientnegative/input4.q.out
index eb17075..887865d 100644
--- a/ql/src/test/results/clientnegative/input4.q.out
+++ b/ql/src/test/results/clientnegative/input4.q.out
@@ -1 +1 @@
-FAILED: SemanticException [Error 10052]: In strict mode, cartesian product is 
not allowed. If you really want to perform the operation, set 
hive.mapred.mode=nonstrict
+FAILED: SemanticException Cartesian products are disabled for safety reasons. 
If you know what you are doing, please make sure that 
hive.strict.checks.cartesian.product is set to false and that hive.mapred.mode 
is not set to 'strict' to enable them.

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientnegative/input_part0_neg.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/input_part0_neg.q.out 
b/ql/src/test/results/clientnegative/input_part0_neg.q.out
index 4c717b7..3abe4b7 100644
--- a/ql/src/test/results/clientnegative/input_part0_neg.q.out
+++ b/ql/src/test/results/clientnegative/input_part0_neg.q.out
@@ -1 +1 @@
-FAILED: SemanticException [Error 10041]: No partition predicate found for 
Alias "x" Table "srcpart"
+FAILED: SemanticException Queries against partitioned tables without a 
partition filter are disabled for safety reasons. If you know what you are 
doing, please make sure that hive.strict.checks.large.query is set to false and 
that hive.mapred.mode is not set to 'strict' to enable them. No partition 
predicate for Alias "x" Table "srcpart"

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientnegative/invalid_distinct1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/invalid_distinct1.q.out 
b/ql/src/test/results/clientnegative/invalid_distinct1.q.out
index c1c95a9..c08b1c3 100644
--- a/ql/src/test/results/clientnegative/invalid_distinct1.q.out
+++ b/ql/src/test/results/clientnegative/invalid_distinct1.q.out
@@ -1 +1 @@
-FAILED: SemanticException 1:15 Distinct keyword is not support in current 
context. Error encountered near token 'value'
+FAILED: SemanticException 2:15 Distinct keyword is not support in current 
context. Error encountered near token 'value'

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientnegative/invalid_distinct3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/invalid_distinct3.q.out 
b/ql/src/test/results/clientnegative/invalid_distinct3.q.out
index e478860..444af62 100644
--- a/ql/src/test/results/clientnegative/invalid_distinct3.q.out
+++ b/ql/src/test/results/clientnegative/invalid_distinct3.q.out
@@ -1 +1 @@
-FAILED: SemanticException 1:20 Distinct keyword is not support in current 
context. Error encountered near token 'value'
+FAILED: SemanticException 2:20 Distinct keyword is not support in current 
context. Error encountered near token 'value'

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientnegative/strict_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/strict_join.q.out 
b/ql/src/test/results/clientnegative/strict_join.q.out
index eb17075..887865d 100644
--- a/ql/src/test/results/clientnegative/strict_join.q.out
+++ b/ql/src/test/results/clientnegative/strict_join.q.out
@@ -1 +1 @@
-FAILED: SemanticException [Error 10052]: In strict mode, cartesian product is 
not allowed. If you really want to perform the operation, set 
hive.mapred.mode=nonstrict
+FAILED: SemanticException Cartesian products are disabled for safety reasons. 
If you know what you are doing, please make sure that 
hive.strict.checks.cartesian.product is set to false and that hive.mapred.mode 
is not set to 'strict' to enable them.

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientnegative/strict_orderby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/strict_orderby.q.out 
b/ql/src/test/results/clientnegative/strict_orderby.q.out
index 82d1f9c..08c1062 100644
--- a/ql/src/test/results/clientnegative/strict_orderby.q.out
+++ b/ql/src/test/results/clientnegative/strict_orderby.q.out
@@ -1 +1 @@
-FAILED: SemanticException 4:47 In strict mode, if ORDER BY is specified, LIMIT 
must also be specified. Error encountered near token 'key'
+FAILED: SemanticException 4:47 Order by-s without limit are disabled for 
safety reasons. If you know what you are doing, please make sure that 
hive.strict.checks.large.query is set to false and that hive.mapred.mode is not 
set to 'strict' to enable them.. Error encountered near token 'key'

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientnegative/strict_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/strict_pruning.q.out 
b/ql/src/test/results/clientnegative/strict_pruning.q.out
index ff50b28..d45a2b8 100644
--- a/ql/src/test/results/clientnegative/strict_pruning.q.out
+++ b/ql/src/test/results/clientnegative/strict_pruning.q.out
@@ -1 +1 @@
-FAILED: SemanticException [Error 10041]: No partition predicate found for 
Alias "srcpart" Table "srcpart"
+FAILED: SemanticException Queries against partitioned tables without a 
partition filter are disabled for safety reasons. If you know what you are 
doing, please make sure that hive.strict.checks.large.query is set to false and 
that hive.mapred.mode is not set to 'strict' to enable them. No partition 
predicate for Alias "srcpart" Table "srcpart"

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientnegative/subquery_notexists_implicit_gby.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientnegative/subquery_notexists_implicit_gby.q.out 
b/ql/src/test/results/clientnegative/subquery_notexists_implicit_gby.q.out
index 6d9fa0a..da38f5f 100644
--- a/ql/src/test/results/clientnegative/subquery_notexists_implicit_gby.q.out
+++ b/ql/src/test/results/clientnegative/subquery_notexists_implicit_gby.q.out
@@ -1 +1 @@
-FAILED: SemanticException [Error 10250]: Line 7:7 Invalid SubQuery expression 
'key': A Not Exists predicate on SubQuery with implicit Aggregation(no Group By 
clause) cannot be rewritten. (predicate will always return false).
+FAILED: SemanticException [Error 10002]: Line 8:20 Invalid column reference 
'value'

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientpositive/create_genericudf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/create_genericudf.q.out 
b/ql/src/test/results/clientpositive/create_genericudf.q.out
index 586f0ba..db3a9b5 100644
--- a/ql/src/test/results/clientpositive/create_genericudf.q.out
+++ b/ql/src/test/results/clientpositive/create_genericudf.q.out
@@ -50,13 +50,13 @@ SELECT
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@dest1
-POSTHOOK: Lineage: dest1.c1 EXPRESSION []
-POSTHOOK: Lineage: dest1.c2 EXPRESSION []
+POSTHOOK: Lineage: dest1.c1 SIMPLE []
+POSTHOOK: Lineage: dest1.c2 SIMPLE []
 POSTHOOK: Lineage: dest1.c3 EXPRESSION []
 POSTHOOK: Lineage: dest1.c4 EXPRESSION []
 POSTHOOK: Lineage: dest1.c5 EXPRESSION []
-POSTHOOK: Lineage: dest1.c6 EXPRESSION []
-POSTHOOK: Lineage: dest1.c7 EXPRESSION []
+POSTHOOK: Lineage: dest1.c6 SIMPLE []
+POSTHOOK: Lineage: dest1.c7 SIMPLE []
 PREHOOK: query: SELECT dest1.* FROM dest1 LIMIT 1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientpositive/cte_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cte_2.q.out 
b/ql/src/test/results/clientpositive/cte_2.q.out
index a8bc760..d6923ba 100644
--- a/ql/src/test/results/clientpositive/cte_2.q.out
+++ b/ql/src/test/results/clientpositive/cte_2.q.out
@@ -40,7 +40,7 @@ select *
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@s1
-POSTHOOK: Lineage: s1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, 
comment:default), ]
+POSTHOOK: Lineage: s1.key SIMPLE []
 POSTHOOK: Lineage: s1.value SIMPLE [(src)src.FieldSchema(name:value, 
type:string, comment:default), ]
 PREHOOK: query: select * from s1
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientpositive/empty_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/empty_join.q.out 
b/ql/src/test/results/clientpositive/empty_join.q.out
index 3f8aedf..99e8f94 100644
--- a/ql/src/test/results/clientpositive/empty_join.q.out
+++ b/ql/src/test/results/clientpositive/empty_join.q.out
@@ -59,31 +59,45 @@ STAGE PLANS:
   Stage: Stage-5
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        t2 
+        $hdt$_1:t2 
           Fetch Operator
             limit: -1
-        t3 
+        $hdt$_2:t3 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        t2 
+        $hdt$_1:t2 
           TableScan
             alias: t2
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
-            HashTable Sink Operator
-              keys:
-                0 id (type: int)
-                1 id (type: int)
-                2 id (type: int)
-        t3 
+            Filter Operator
+              predicate: id is not null (type: boolean)
+              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
+              Select Operator
+                expressions: id (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL 
Column stats: NONE
+                HashTable Sink Operator
+                  keys:
+                    0 _col0 (type: int)
+                    1 _col0 (type: int)
+                    2 _col0 (type: int)
+        $hdt$_2:t3 
           TableScan
             alias: t3
             Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column 
stats: NONE
-            HashTable Sink Operator
-              keys:
-                0 id (type: int)
-                1 id (type: int)
-                2 id (type: int)
+            Filter Operator
+              predicate: id is not null (type: boolean)
+              Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE 
Column stats: NONE
+              Select Operator
+                expressions: id (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE 
Column stats: NONE
+                HashTable Sink Operator
+                  keys:
+                    0 _col0 (type: int)
+                    1 _col0 (type: int)
+                    2 _col0 (type: int)
 
   Stage: Stage-4
     Map Reduce
@@ -91,27 +105,30 @@ STAGE PLANS:
           TableScan
             alias: t1
             Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column 
stats: NONE
-            Map Join Operator
-              condition map:
-                   Left Outer Join0 to 1
-                   Inner Join 0 to 2
-              keys:
-                0 id (type: int)
-                1 id (type: int)
-                2 id (type: int)
-              outputColumnNames: _col0, _col4, _col8
-              Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE 
Column stats: NONE
+            Filter Operator
+              predicate: id is not null (type: boolean)
+              Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE 
Column stats: NONE
               Select Operator
-                expressions: _col0 (type: int), _col4 (type: int), _col8 
(type: int)
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE 
Column stats: NONE
-                File Output Operator
-                  compressed: false
+                expressions: id (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE 
Column stats: NONE
+                Map Join Operator
+                  condition map:
+                       Left Outer Join0 to 1
+                       Inner Join 0 to 2
+                  keys:
+                    0 _col0 (type: int)
+                    1 _col0 (type: int)
+                    2 _col0 (type: int)
+                  outputColumnNames: _col0, _col1, _col2
                   Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE 
Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE 
Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
       Local Work:
         Map Reduce Local Work
 

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out 
b/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out
index 8ca8866..4e2665f 100644
--- a/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out
+++ b/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out
@@ -194,11 +194,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: key
+              outputColumnNames: _col0
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
               Group By Operator
                 aggregations: max('pants'), max('pANTS')
-                keys: key (type: string)
+                keys: _col0 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientpositive/insert1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert1.q.out 
b/ql/src/test/results/clientpositive/insert1.q.out
index 49dd2d5..7a2c429 100644
--- a/ql/src/test/results/clientpositive/insert1.q.out
+++ b/ql/src/test/results/clientpositive/insert1.q.out
@@ -26,7 +26,7 @@ POSTHOOK: query: insert overwrite table insert1 select a.key, 
a.value from inser
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@insert2
 POSTHOOK: Output: default@insert1
-POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, 
type:int, comment:null), ]
+POSTHOOK: Lineage: insert1.key SIMPLE []
 POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, 
type:string, comment:null), ]
 PREHOOK: query: explain insert into table insert1 select a.key, a.value from 
insert2 a WHERE (a.key=-1)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientpositive/join_cond_pushdown_unqual5.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/join_cond_pushdown_unqual5.q.out 
b/ql/src/test/results/clientpositive/join_cond_pushdown_unqual5.q.out
index ab333b0..aea9822 100644
--- a/ql/src/test/results/clientpositive/join_cond_pushdown_unqual5.q.out
+++ b/ql/src/test/results/clientpositive/join_cond_pushdown_unqual5.q.out
@@ -54,44 +54,48 @@ STAGE PLANS:
           TableScan
             alias: l
             Statistics: Num rows: 2 Data size: 52 Basic stats: COMPLETE Column 
stats: NONE
-            Reduce Output Operator
-              key expressions: lk1 (type: string), lk2 (type: string)
-              sort order: ++
-              Map-reduce partition columns: lk1 (type: string), lk2 (type: 
string)
+            Select Operator
+              expressions: index (type: int), la (type: int), lk1 (type: 
string), lk2 (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3
               Statistics: Num rows: 2 Data size: 52 Basic stats: COMPLETE 
Column stats: NONE
-              value expressions: index (type: int), la (type: int)
+              Reduce Output Operator
+                key expressions: _col2 (type: string), _col3 (type: string)
+                sort order: ++
+                Map-reduce partition columns: _col2 (type: string), _col3 
(type: string)
+                Statistics: Num rows: 2 Data size: 52 Basic stats: COMPLETE 
Column stats: NONE
+                value expressions: _col0 (type: int), _col1 (type: int)
           TableScan
             alias: r
             Statistics: Num rows: 2 Data size: 47 Basic stats: COMPLETE Column 
stats: NONE
-            Reduce Output Operator
-              key expressions: rk1 (type: string), rk2 (type: string)
-              sort order: ++
-              Map-reduce partition columns: rk1 (type: string), rk2 (type: 
string)
+            Select Operator
+              expressions: ra (type: int), rk1 (type: string), rk2 (type: 
string)
+              outputColumnNames: _col0, _col1, _col2
               Statistics: Num rows: 2 Data size: 47 Basic stats: COMPLETE 
Column stats: NONE
-              value expressions: ra (type: int)
+              Reduce Output Operator
+                key expressions: _col1 (type: string), _col2 (type: string)
+                sort order: ++
+                Map-reduce partition columns: _col1 (type: string), _col2 
(type: string)
+                Statistics: Num rows: 2 Data size: 47 Basic stats: COMPLETE 
Column stats: NONE
+                value expressions: _col0 (type: int)
       Reduce Operator Tree:
         Join Operator
           condition map:
                Left Outer Join0 to 1
           keys:
-            0 lk1 (type: string), lk2 (type: string)
-            1 rk1 (type: string), rk2 (type: string)
-          outputColumnNames: _col0, _col1, _col2, _col3, _col7, _col8, _col9
+            0 _col2 (type: string), _col3 (type: string)
+            1 _col1 (type: string), _col2 (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
           Statistics: Num rows: 2 Data size: 57 Basic stats: COMPLETE Column 
stats: NONE
           Filter Operator
-            predicate: (COALESCE(_col1,'EMPTY') = COALESCE(_col7,'EMPTY')) 
(type: boolean)
+            predicate: (COALESCE(_col1,'EMPTY') = COALESCE(_col4,'EMPTY')) 
(type: boolean)
             Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE Column 
stats: NONE
-            Select Operator
-              expressions: _col0 (type: int), _col1 (type: int), _col2 (type: 
string), _col3 (type: string), _col7 (type: int), _col8 (type: string), _col9 
(type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6
+            File Output Operator
+              compressed: false
               Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE 
Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE 
Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -123,18 +127,22 @@ STAGE PLANS:
   Stage: Stage-4
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        r 
+        $hdt$_1:r 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        r 
+        $hdt$_1:r 
           TableScan
             alias: r
             Statistics: Num rows: 2 Data size: 47 Basic stats: COMPLETE Column 
stats: NONE
-            HashTable Sink Operator
-              keys:
-                0 lk1 (type: string), lk2 (type: string)
-                1 rk1 (type: string), rk2 (type: string)
+            Select Operator
+              expressions: ra (type: int), rk1 (type: string), rk2 (type: 
string)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 2 Data size: 47 Basic stats: COMPLETE 
Column stats: NONE
+              HashTable Sink Operator
+                keys:
+                  0 _col2 (type: string), _col3 (type: string)
+                  1 _col1 (type: string), _col2 (type: string)
 
   Stage: Stage-3
     Map Reduce
@@ -142,20 +150,20 @@ STAGE PLANS:
           TableScan
             alias: l
             Statistics: Num rows: 2 Data size: 52 Basic stats: COMPLETE Column 
stats: NONE
-            Map Join Operator
-              condition map:
-                   Left Outer Join0 to 1
-              keys:
-                0 lk1 (type: string), lk2 (type: string)
-                1 rk1 (type: string), rk2 (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3, _col7, _col8, 
_col9
-              Statistics: Num rows: 2 Data size: 57 Basic stats: COMPLETE 
Column stats: NONE
-              Filter Operator
-                predicate: (COALESCE(_col1,'EMPTY') = COALESCE(_col7,'EMPTY')) 
(type: boolean)
-                Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE 
Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: int), _col1 (type: int), _col2 
(type: string), _col3 (type: string), _col7 (type: int), _col8 (type: string), 
_col9 (type: string)
-                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6
+            Select Operator
+              expressions: index (type: int), la (type: int), lk1 (type: 
string), lk2 (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3
+              Statistics: Num rows: 2 Data size: 52 Basic stats: COMPLETE 
Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Outer Join0 to 1
+                keys:
+                  0 _col2 (type: string), _col3 (type: string)
+                  1 _col1 (type: string), _col2 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6
+                Statistics: Num rows: 2 Data size: 57 Basic stats: COMPLETE 
Column stats: NONE
+                Filter Operator
+                  predicate: (COALESCE(_col1,'EMPTY') = 
COALESCE(_col4,'EMPTY')) (type: boolean)
                   Statistics: Num rows: 1 Data size: 28 Basic stats: COMPLETE 
Column stats: NONE
                   File Output Operator
                     compressed: false

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientpositive/literal_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/literal_decimal.q.out 
b/ql/src/test/results/clientpositive/literal_decimal.q.out
index eddc1a4..0b6299b 100644
--- a/ql/src/test/results/clientpositive/literal_decimal.q.out
+++ b/ql/src/test/results/clientpositive/literal_decimal.q.out
@@ -14,12 +14,12 @@ STAGE PLANS:
           alias: src
           Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: COMPLETE
           Select Operator
-            expressions: -1 (type: decimal(1,0)), 0 (type: decimal(1,0)), 1 
(type: decimal(1,0)), 3.14 (type: decimal(3,2)), -3.14 (type: decimal(3,2)), 
99999999999999999 (type: decimal(17,0)), 99999999999999999.9999999999999 (type: 
decimal(30,13)), null (type: decimal(1,0))
+            expressions: -1 (type: int), 0 (type: int), 1 (type: int), 3.14 
(type: decimal(3,2)), -3.14 (type: decimal(3,2)), 99999999999999999 (type: 
bigint), 99999999999999999.9999999999999 (type: decimal(30,13)), null (type: 
void)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6, _col7
-            Statistics: Num rows: 500 Data size: 392000 Basic stats: COMPLETE 
Column stats: COMPLETE
+            Statistics: Num rows: 500 Data size: 178000 Basic stats: COMPLETE 
Column stats: COMPLETE
             Limit
               Number of rows: 1
-              Statistics: Num rows: 1 Data size: 784 Basic stats: COMPLETE 
Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE 
Column stats: COMPLETE
               ListSink
 
 PREHOOK: query: SELECT -1BD, 0BD, 1BD, 3.14BD, -3.14BD, 99999999999999999BD, 
99999999999999999.9999999999999BD, 1E99BD FROM src LIMIT 1

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientpositive/load_dyn_part14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/load_dyn_part14.q.out 
b/ql/src/test/results/clientpositive/load_dyn_part14.q.out
index b35cfaf..53e9df3 100644
--- a/ql/src/test/results/clientpositive/load_dyn_part14.q.out
+++ b/ql/src/test/results/clientpositive/load_dyn_part14.q.out
@@ -74,13 +74,13 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: COMPLETE
             Select Operator
-              Statistics: Num rows: 500 Data size: 43000 Basic stats: COMPLETE 
Column stats: COMPLETE
+              Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL 
Column stats: COMPLETE
               Limit
                 Number of rows: 2
-                Statistics: Num rows: 2 Data size: 172 Basic stats: COMPLETE 
Column stats: COMPLETE
+                Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL 
Column stats: COMPLETE
                 Reduce Output Operator
                   sort order: 
-                  Statistics: Num rows: 2 Data size: 172 Basic stats: COMPLETE 
Column stats: COMPLETE
+                  Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL 
Column stats: COMPLETE
                   TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Limit
@@ -195,13 +195,13 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: COMPLETE
             Select Operator
-              Statistics: Num rows: 500 Data size: 85000 Basic stats: COMPLETE 
Column stats: COMPLETE
+              Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE 
Column stats: COMPLETE
               Limit
                 Number of rows: 2
-                Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE 
Column stats: COMPLETE
+                Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
                 Reduce Output Operator
                   sort order: 
-                  Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE 
Column stats: COMPLETE
+                  Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
                   TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Limit
@@ -225,13 +225,13 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: COMPLETE
             Select Operator
-              Statistics: Num rows: 500 Data size: 85500 Basic stats: COMPLETE 
Column stats: COMPLETE
+              Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE 
Column stats: COMPLETE
               Limit
                 Number of rows: 2
-                Statistics: Num rows: 2 Data size: 342 Basic stats: COMPLETE 
Column stats: COMPLETE
+                Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
                 Reduce Output Operator
                   sort order: 
-                  Statistics: Num rows: 2 Data size: 342 Basic stats: COMPLETE 
Column stats: COMPLETE
+                  Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
                   TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Limit

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientpositive/offset_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/offset_limit.q.out 
b/ql/src/test/results/clientpositive/offset_limit.q.out
index 2092c1d..0838a68 100644
--- a/ql/src/test/results/clientpositive/offset_limit.q.out
+++ b/ql/src/test/results/clientpositive/offset_limit.q.out
@@ -17,12 +17,12 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
             Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: key, value
+              expressions: key (type: string), substr(value, 5) (type: string)
+              outputColumnNames: _col0, _col1
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
               Group By Operator
-                aggregations: sum(substr(value, 5))
-                keys: key (type: string)
+                aggregations: sum(_col1)
+                keys: _col0 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientpositive/offset_limit_ppd_optimizer.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientpositive/offset_limit_ppd_optimizer.q.out 
b/ql/src/test/results/clientpositive/offset_limit_ppd_optimizer.q.out
index facb26c..dcdcea5 100644
--- a/ql/src/test/results/clientpositive/offset_limit_ppd_optimizer.q.out
+++ b/ql/src/test/results/clientpositive/offset_limit_ppd_optimizer.q.out
@@ -172,12 +172,12 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
             Select Operator
-              expressions: value (type: string), key (type: string)
-              outputColumnNames: value, key
+              expressions: value (type: string), (UDFToDouble(key) + 1.0) 
(type: double)
+              outputColumnNames: _col0, _col1
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
               Group By Operator
-                aggregations: sum((key + 1))
-                keys: value (type: string)
+                aggregations: sum(_col1)
+                keys: _col0 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
@@ -261,12 +261,12 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
             Select Operator
-              expressions: value (type: string), key (type: string)
-              outputColumnNames: value, key
+              expressions: value (type: string), (UDFToDouble(key) + 1.0) 
(type: double)
+              outputColumnNames: _col0, _col1
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
               Group By Operator
-                aggregations: avg((key + 1))
-                keys: value (type: string)
+                aggregations: avg(_col1)
+                keys: _col0 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
@@ -1050,15 +1050,15 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
             Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: key, value
+              expressions: concat(key, value, value, value, value, value, 
value, value, value, value) (type: string), key (type: string)
+              outputColumnNames: _col0, _col1
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
               Reduce Output Operator
-                key expressions: concat(key, value, value, value, value, 
value, value, value, value, value) (type: string)
+                key expressions: _col0 (type: string)
                 sort order: +
-                Map-reduce partition columns: concat(key, value, value, value, 
value, value, value, value, value, value) (type: string)
+                Map-reduce partition columns: _col0 (type: string)
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-                value expressions: key (type: string)
+                value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Group By Operator
           aggregations: sum(VALUE._col0)
@@ -1219,12 +1219,12 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: key
+              outputColumnNames: _col0
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
               Reduce Output Operator
-                key expressions: key (type: string)
+                key expressions: _col0 (type: string)
                 sort order: +
-                Map-reduce partition columns: key (type: string)
+                Map-reduce partition columns: _col0 (type: string)
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
                 TopN Hash Memory Usage: 2.0E-5
       Reduce Operator Tree:
@@ -1294,12 +1294,12 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: key
+              outputColumnNames: _col0
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
               Reduce Output Operator
-                key expressions: key (type: string)
+                key expressions: _col0 (type: string)
                 sort order: +
-                Map-reduce partition columns: key (type: string)
+                Map-reduce partition columns: _col0 (type: string)
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
                 TopN Hash Memory Usage: 2.0E-5
       Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out 
b/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out
index 41b6fa2..4559b39 100644
--- a/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out
+++ b/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out
@@ -396,7 +396,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 POSTHOOK: Output: default@tmptable
 POSTHOOK: Lineage: tmptable.ds SIMPLE [(srcpart)a.FieldSchema(name:hr, 
type:string, comment:null), ]
-POSTHOOK: Lineage: tmptable.hr SIMPLE [(srcpart)a.FieldSchema(name:ds, 
type:string, comment:null), ]
+POSTHOOK: Lineage: tmptable.hr SIMPLE []
 POSTHOOK: Lineage: tmptable.key SIMPLE [(srcpart)a.FieldSchema(name:key, 
type:string, comment:default), ]
 POSTHOOK: Lineage: tmptable.value SIMPLE [(srcpart)a.FieldSchema(name:value, 
type:string, comment:default), ]
 PREHOOK: query: select * from tmptable x sort by x.key,x.value,x.ds,x.hr

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientpositive/smb_mapjoin_18.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_18.q.out 
b/ql/src/test/results/clientpositive/smb_mapjoin_18.q.out
index 6106188..4b29056 100644
--- a/ql/src/test/results/clientpositive/smb_mapjoin_18.q.out
+++ b/ql/src/test/results/clientpositive/smb_mapjoin_18.q.out
@@ -238,17 +238,28 @@ STAGE PLANS:
               predicate: (key = 238) (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
               Select Operator
-                expressions: key (type: int), value (type: string)
+                expressions: 238 (type: int), value (type: string)
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: 
COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: 
COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      name: default.test_table2
+                  value expressions: _col1 (type: string)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: 
string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                name: default.test_table2
 
   Stage: Stage-0
     Move Operator
@@ -277,7 +288,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@test_table1
 POSTHOOK: Input: default@test_table1@ds=1
 POSTHOOK: Output: default@test_table2@ds=2
-POSTHOOK: Lineage: test_table2 PARTITION(ds=2).key SIMPLE 
[(test_table1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table2 PARTITION(ds=2).key SIMPLE []
 POSTHOOK: Lineage: test_table2 PARTITION(ds=2).value SIMPLE 
[(test_table1)a.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: select count(*) from test_table2 where ds = '2'
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientpositive/spark/insert1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/insert1.q.out 
b/ql/src/test/results/clientpositive/spark/insert1.q.out
index e72ba16..50e8376 100644
--- a/ql/src/test/results/clientpositive/spark/insert1.q.out
+++ b/ql/src/test/results/clientpositive/spark/insert1.q.out
@@ -26,7 +26,7 @@ POSTHOOK: query: insert overwrite table insert1 select a.key, 
a.value from inser
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@insert2
 POSTHOOK: Output: default@insert1
-POSTHOOK: Lineage: insert1.key SIMPLE [(insert2)a.FieldSchema(name:key, 
type:int, comment:null), ]
+POSTHOOK: Lineage: insert1.key SIMPLE []
 POSTHOOK: Lineage: insert1.value SIMPLE [(insert2)a.FieldSchema(name:value, 
type:string, comment:null), ]
 PREHOOK: query: explain insert into table insert1 select a.key, a.value from 
insert2 a WHERE (a.key=-1)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out 
b/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out
index 05ccf8b..84d99c3 100644
--- a/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out
+++ b/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out
@@ -73,13 +73,13 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: COMPLETE
                   Select Operator
-                    Statistics: Num rows: 500 Data size: 43000 Basic stats: 
COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 500 Data size: 0 Basic stats: 
PARTIAL Column stats: COMPLETE
                     Limit
                       Number of rows: 2
-                      Statistics: Num rows: 2 Data size: 172 Basic stats: 
COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 2 Data size: 0 Basic stats: 
PARTIAL Column stats: COMPLETE
                       Reduce Output Operator
                         sort order: 
-                        Statistics: Num rows: 2 Data size: 172 Basic stats: 
COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 2 Data size: 0 Basic stats: 
PARTIAL Column stats: COMPLETE
                         TopN Hash Memory Usage: 0.1
         Reducer 2 
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientpositive/spark/smb_mapjoin_18.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_18.q.out 
b/ql/src/test/results/clientpositive/spark/smb_mapjoin_18.q.out
index 3076e06..bfdd529 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_18.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_18.q.out
@@ -233,6 +233,8 @@ STAGE DEPENDENCIES:
 STAGE PLANS:
   Stage: Stage-1
     Spark
+      Edges:
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -244,17 +246,29 @@ STAGE PLANS:
                     predicate: (key = 238) (type: boolean)
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: 
COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: key (type: int), value (type: string)
+                      expressions: 238 (type: int), value (type: string)
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: 
COMPLETE Column stats: NONE
-                      File Output Operator
-                        compressed: false
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
                         Statistics: Num rows: 250 Data size: 2656 Basic stats: 
COMPLETE Column stats: NONE
-                        table:
-                            input format: 
org.apache.hadoop.mapred.TextInputFormat
-                            output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                            serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                            name: default.test_table2
+                        value expressions: _col1 (type: string)
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 
(type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: 
COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 250 Data size: 2656 Basic stats: 
COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.test_table2
 
   Stage: Stage-0
     Move Operator
@@ -283,7 +297,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@test_table1
 POSTHOOK: Input: default@test_table1@ds=1
 POSTHOOK: Output: default@test_table2@ds=2
-POSTHOOK: Lineage: test_table2 PARTITION(ds=2).key SIMPLE 
[(test_table1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: test_table2 PARTITION(ds=2).key SIMPLE []
 POSTHOOK: Lineage: test_table2 PARTITION(ds=2).value SIMPLE 
[(test_table1)a.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: select count(*) from test_table2 where ds = '2'
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientpositive/spark/smb_mapjoin_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_8.q.out 
b/ql/src/test/results/clientpositive/spark/smb_mapjoin_8.q.out
index babaff8..17821a8 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_8.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_8.q.out
@@ -150,7 +150,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_1 
select * from smb_bucket_i
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@smb_bucket_input
 POSTHOOK: Output: default@smb_bucket4_1
-POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE 
[(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, 
comment:null), ]
+POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE []
 POSTHOOK: Lineage: smb_bucket4_1.value SIMPLE 
[(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, 
comment:null), ]
 PREHOOK: query: insert overwrite table smb_bucket4_2 select * from 
smb_bucket_input where key=5000
 PREHOOK: type: QUERY
@@ -160,7 +160,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_2 
select * from smb_bucket_i
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@smb_bucket_input
 POSTHOOK: Output: default@smb_bucket4_2
-POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE 
[(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, 
comment:null), ]
+POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE []
 POSTHOOK: Lineage: smb_bucket4_2.value SIMPLE 
[(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, 
comment:null), ]
 PREHOOK: query: select /*+mapjoin(a)*/ * from smb_bucket4_1 a full outer join 
smb_bucket4_2 b on a.key = b.key
 PREHOOK: type: QUERY
@@ -335,7 +335,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_1 
select * from smb_bucket_i
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@smb_bucket_input
 POSTHOOK: Output: default@smb_bucket4_1
-POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE 
[(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, 
comment:null), ]
+POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE []
 POSTHOOK: Lineage: smb_bucket4_1.value SIMPLE 
[(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, 
comment:null), ]
 PREHOOK: query: insert overwrite table smb_bucket4_2 select * from 
smb_bucket_input where key=5000
 PREHOOK: type: QUERY
@@ -345,7 +345,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_2 
select * from smb_bucket_i
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@smb_bucket_input
 POSTHOOK: Output: default@smb_bucket4_2
-POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE 
[(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, 
comment:null), ]
+POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE []
 POSTHOOK: Lineage: smb_bucket4_2.value SIMPLE 
[(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, 
comment:null), ]
 PREHOOK: query: insert overwrite table smb_bucket4_3 select * from 
smb_bucket_input where key=4000
 PREHOOK: type: QUERY
@@ -355,7 +355,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_3 
select * from smb_bucket_i
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@smb_bucket_input
 POSTHOOK: Output: default@smb_bucket4_3
-POSTHOOK: Lineage: smb_bucket4_3.key SIMPLE 
[(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, 
comment:null), ]
+POSTHOOK: Lineage: smb_bucket4_3.key SIMPLE []
 POSTHOOK: Lineage: smb_bucket4_3.value SIMPLE 
[(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, 
comment:null), ]
 PREHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a full outer 
join smb_bucket4_2 b on a.key = b.key
 full outer join smb_bucket4_3 c on a.key=c.key
@@ -381,7 +381,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_1 
select * from smb_bucket_i
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@smb_bucket_input
 POSTHOOK: Output: default@smb_bucket4_1
-POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE 
[(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, 
comment:null), ]
+POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE []
 POSTHOOK: Lineage: smb_bucket4_1.value SIMPLE 
[(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, 
comment:null), ]
 PREHOOK: query: insert overwrite table smb_bucket4_2 select * from 
smb_bucket_input where key=4000
 PREHOOK: type: QUERY
@@ -391,7 +391,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_2 
select * from smb_bucket_i
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@smb_bucket_input
 POSTHOOK: Output: default@smb_bucket4_2
-POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE 
[(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, 
comment:null), ]
+POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE []
 POSTHOOK: Lineage: smb_bucket4_2.value SIMPLE 
[(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, 
comment:null), ]
 PREHOOK: query: insert overwrite table smb_bucket4_3 select * from 
smb_bucket_input where key=5000
 PREHOOK: type: QUERY
@@ -401,7 +401,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_3 
select * from smb_bucket_i
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@smb_bucket_input
 POSTHOOK: Output: default@smb_bucket4_3
-POSTHOOK: Lineage: smb_bucket4_3.key SIMPLE 
[(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, 
comment:null), ]
+POSTHOOK: Lineage: smb_bucket4_3.key SIMPLE []
 POSTHOOK: Lineage: smb_bucket4_3.value SIMPLE 
[(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, 
comment:null), ]
 PREHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a full outer 
join smb_bucket4_2 b on a.key = b.key
 full outer join smb_bucket4_3 c on a.key=c.key
@@ -427,7 +427,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_1 
select * from smb_bucket_i
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@smb_bucket_input
 POSTHOOK: Output: default@smb_bucket4_1
-POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE 
[(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, 
comment:null), ]
+POSTHOOK: Lineage: smb_bucket4_1.key SIMPLE []
 POSTHOOK: Lineage: smb_bucket4_1.value SIMPLE 
[(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, 
comment:null), ]
 PREHOOK: query: insert overwrite table smb_bucket4_2 select * from 
smb_bucket_input where key=4000
 PREHOOK: type: QUERY
@@ -437,7 +437,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_2 
select * from smb_bucket_i
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@smb_bucket_input
 POSTHOOK: Output: default@smb_bucket4_2
-POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE 
[(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, 
comment:null), ]
+POSTHOOK: Lineage: smb_bucket4_2.key SIMPLE []
 POSTHOOK: Lineage: smb_bucket4_2.value SIMPLE 
[(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, 
comment:null), ]
 PREHOOK: query: insert overwrite table smb_bucket4_3 select * from 
smb_bucket_input where key=5000
 PREHOOK: type: QUERY
@@ -447,7 +447,7 @@ POSTHOOK: query: insert overwrite table smb_bucket4_3 
select * from smb_bucket_i
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@smb_bucket_input
 POSTHOOK: Output: default@smb_bucket4_3
-POSTHOOK: Lineage: smb_bucket4_3.key SIMPLE 
[(smb_bucket_input)smb_bucket_input.FieldSchema(name:key, type:int, 
comment:null), ]
+POSTHOOK: Lineage: smb_bucket4_3.key SIMPLE []
 POSTHOOK: Lineage: smb_bucket4_3.value SIMPLE 
[(smb_bucket_input)smb_bucket_input.FieldSchema(name:value, type:string, 
comment:null), ]
 PREHOOK: query: select /*+mapjoin(b,c)*/ * from smb_bucket4_1 a full outer 
join smb_bucket4_2 b on a.key = b.key
 full outer join smb_bucket4_3 c on a.key=c.key

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientpositive/spark/union_date_trim.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_date_trim.q.out 
b/ql/src/test/results/clientpositive/spark/union_date_trim.q.out
index e2f5269..324e8b7 100644
--- a/ql/src/test/results/clientpositive/spark/union_date_trim.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_date_trim.q.out
@@ -51,4 +51,4 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@testdate
 POSTHOOK: Output: default@testdate
 POSTHOOK: Lineage: testdate.dt EXPRESSION 
[(testdate)testdate.FieldSchema(name:dt, type:date, comment:null), ]
-POSTHOOK: Lineage: testdate.id EXPRESSION 
[(testdate)testdate.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: testdate.id EXPRESSION []

http://git-wip-us.apache.org/repos/asf/hive/blob/27800976/ql/src/test/results/clientpositive/stats_empty_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_empty_partition.q.out 
b/ql/src/test/results/clientpositive/stats_empty_partition.q.out
index 0ad031c..d7f2e73 100644
--- a/ql/src/test/results/clientpositive/stats_empty_partition.q.out
+++ b/ql/src/test/results/clientpositive/stats_empty_partition.q.out
@@ -20,7 +20,7 @@ POSTHOOK: query: insert overwrite table tmptable partition 
(part = '1') select *
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@tmptable@part=1
-POSTHOOK: Lineage: tmptable PARTITION(part=1).key SIMPLE 
[(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tmptable PARTITION(part=1).key SIMPLE []
 POSTHOOK: Lineage: tmptable PARTITION(part=1).value SIMPLE 
[(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: describe formatted tmptable partition (part = '1')
 PREHOOK: type: DESCTABLE

Reply via email to