hive git commit: HIVE-13290: Support primary keys/foreign keys constraint as part of create table command in Hive (Hari Subramaniyan, reviewed by Ashutosh Chauhan)

2016-04-25 Thread harisankar
Repository: hive
Updated Branches:
  refs/heads/master caa3ec761 -> 53249a357


HIVE-13290: Support primary keys/foreign keys constraint as part of create 
table command in Hive (Hari Subramaniyan, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/53249a35
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/53249a35
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/53249a35

Branch: refs/heads/master
Commit: 53249a3579dce000736a05348b64faed32fb610c
Parents: caa3ec7
Author: Hari Subramaniyan 
Authored: Mon Apr 25 13:57:17 2016 -0700
Committer: Hari Subramaniyan 
Committed: Mon Apr 25 13:57:17 2016 -0700

--
 .../hadoop/hive/metastore/ObjectStore.java  |  41 +++-
 .../org/apache/hadoop/hive/ql/ErrorMsg.java |   3 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |  11 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java|  24 ++-
 .../hive/ql/parse/BaseSemanticAnalyzer.java | 209 +--
 .../org/apache/hadoop/hive/ql/parse/HiveLexer.g |   9 +
 .../apache/hadoop/hive/ql/parse/HiveParser.g|  92 +++-
 .../hadoop/hive/ql/parse/IdentifiersParser.g|  11 +-
 .../hive/ql/parse/ImportSemanticAnalyzer.java   |   2 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  10 +-
 .../hadoop/hive/ql/plan/CreateTableDesc.java|  43 +++-
 .../hive/ql/parse/TestHiveDecimalParse.java |   2 +-
 .../create_with_constraints_duplicate_name.q|   2 +
 .../create_with_constraints_enable.q|   1 +
 .../create_with_constraints_validate.q  |   1 +
 .../clientpositive/create_with_constraints.q|  12 ++
 ...create_with_constraints_duplicate_name.q.out |  13 ++
 .../create_with_constraints_enable.q.out|   1 +
 .../create_with_constraints_validate.q.out  |   1 +
 .../create_with_constraints.q.out   |  68 ++
 20 files changed, 520 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/53249a35/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index ae6f084..24fbf70 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -1022,6 +1022,11 @@ public class ObjectStore implements RawStore, 
Configurable {
   " table " + tableName + " record to delete");
 }
 
+List tabConstraints = listAllTableConstraints(dbName, 
tableName);
+if (tabConstraints != null && tabConstraints.size() > 0) {
+  pm.deletePersistentAll(tabConstraints);
+}
+
 preDropStorageDescriptor(tbl.getSd());
 // then remove the table
 pm.deletePersistentAll(tbl);
@@ -1035,7 +1040,41 @@ public class ObjectStore implements RawStore, 
Configurable {
 return success;
   }
 
-  @Override
+  private List listAllTableConstraints(String dbName, String 
tableName) {
+List mConstraints = null;
+List constraintNames = new ArrayList();
+Query query = null;
+
+try {
+  query = pm.newQuery("select constraintName from 
org.apache.hadoop.hive.metastore.model.MConstraint  where "
++ "(parentTable.tableName == ptblname && parentTable.database.name == 
pdbname) || "
++ "(childTable != null && childTable.tableName == ctblname && 
childTable.database.name == cdbname)");
+  query.declareParameters("java.lang.String ptblname, java.lang.String 
pdbname,"
+  + "java.lang.String ctblname, java.lang.String cdbname");
+  Collection constraintNamesColl = (Collection) query.
+executeWithArray(tableName, dbName, tableName, dbName);
+  for (Iterator i = constraintNamesColl.iterator(); i.hasNext();) {
+String currName = (String) i.next();
+constraintNames.add(currName);
+  }
+  query = pm.newQuery(MConstraint.class);
+  query.setFilter("param.contains(constraintName)");
+  query.declareParameters("java.util.Collection param");
+  Collection constraints = 
(Collection)query.execute(constraintNames);
+  mConstraints = new ArrayList();
+  for (Iterator i = constraints.iterator(); i.hasNext();) {
+MConstraint currConstraint = (MConstraint) i.next();
+mConstraints.add(currConstraint);
+  }
+} finally {
+  if (query != null) {
+query.closeAll();
+  }
+}
+return mConstraints;
+  }
+
+@Override
   public Table getTable(String dbName, String tableName) throws MetaException {
 boolean commited = false;
 Table tbl = null;


[2/2] hive git commit: HIVE-13424: Refactoring the code to pass a QueryState object rather than HiveConf object (Reviewed by Sergey Shelukhin)

2016-04-25 Thread aihuaxu
HIVE-13424: Refactoring the code to pass a QueryState object rather than 
HiveConf object (Reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/caa3ec76
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/caa3ec76
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/caa3ec76

Branch: refs/heads/master
Commit: caa3ec761c18d822259116fde9ff8a4f181df179
Parents: 86bdcbc
Author: Aihua Xu 
Authored: Tue Mar 15 13:12:57 2016 -0400
Committer: Aihua Xu 
Committed: Mon Apr 25 16:30:09 2016 -0400

--
 .../org/apache/hadoop/hive/cli/CliDriver.java   |   5 -
 .../mapreduce/TestHCatMultiOutputFormat.java|   7 +-
 .../org/apache/hadoop/hive/ql/QTestUtil.java|   9 +-
 .../java/org/apache/hadoop/hive/ql/Driver.java  |  93 ---
 .../org/apache/hadoop/hive/ql/QueryState.java   | 114 +++
 .../hadoop/hive/ql/exec/ColumnStatsTask.java|   5 +-
 .../hive/ql/exec/ColumnStatsUpdateTask.java |   6 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |   9 +-
 .../apache/hadoop/hive/ql/exec/ExplainTask.java |   4 +-
 .../apache/hadoop/hive/ql/exec/FetchTask.java   |   5 +-
 .../hadoop/hive/ql/exec/FunctionTask.java   |   6 +-
 .../hadoop/hive/ql/exec/StatsNoJobTask.java |   5 +-
 .../org/apache/hadoop/hive/ql/exec/Task.java|   6 +-
 .../hadoop/hive/ql/exec/mr/ExecDriver.java  |  11 +-
 .../hive/ql/exec/mr/HadoopJobExecHelper.java|  21 ++--
 .../hadoop/hive/ql/exec/mr/MapredLocalTask.java |   7 +-
 .../hadoop/hive/ql/exec/spark/SparkTask.java|   7 +-
 .../hadoop/hive/ql/history/HiveHistoryImpl.java |   6 +-
 .../apache/hadoop/hive/ql/hooks/ATSHook.java|   4 +-
 .../hadoop/hive/ql/hooks/HookContext.java   |  11 +-
 .../hive/ql/hooks/PostExecutePrinter.java   |  12 +-
 .../hadoop/hive/ql/hooks/PreExecutePrinter.java |  14 ++-
 .../hive/ql/index/TableBasedIndexHandler.java   |   2 -
 .../hadoop/hive/ql/io/merge/MergeFileTask.java  |   7 +-
 .../ql/io/rcfile/stats/PartialScanTask.java |  12 +-
 .../io/rcfile/truncate/ColumnTruncateTask.java  |   7 +-
 .../hive/ql/optimizer/GenMRTableScan1.java  |   2 +-
 .../index/RewriteParseContextGenerator.java |   7 +-
 .../RewriteQueryUsingAggregateIndexCtx.java |   2 +-
 .../hive/ql/parse/BaseSemanticAnalyzer.java |  14 ++-
 .../hadoop/hive/ql/parse/CalcitePlanner.java|  11 +-
 .../ql/parse/ColumnStatsSemanticAnalyzer.java   |   7 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java  |  13 ++-
 .../parse/ExplainSQRewriteSemanticAnalyzer.java |   8 +-
 .../hive/ql/parse/ExplainSemanticAnalyzer.java  |   7 +-
 .../hive/ql/parse/ExportSemanticAnalyzer.java   |   5 +-
 .../hive/ql/parse/FunctionSemanticAnalyzer.java |   5 +-
 .../hive/ql/parse/ImportSemanticAnalyzer.java   |   5 +-
 .../hive/ql/parse/LoadSemanticAnalyzer.java |   5 +-
 .../hive/ql/parse/MacroSemanticAnalyzer.java|  12 +-
 .../hadoop/hive/ql/parse/ParseContext.java  |  14 ++-
 .../hive/ql/parse/ProcessAnalyzeTable.java  |   2 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  35 +++---
 .../hive/ql/parse/SemanticAnalyzerFactory.java  |  57 +-
 .../hadoop/hive/ql/parse/TaskCompiler.java  |   9 +-
 .../hadoop/hive/ql/parse/TezCompiler.java   |   5 +-
 .../ql/parse/UpdateDeleteSemanticAnalyzer.java  |   5 +-
 .../parse/spark/SparkProcessAnalyzeTable.java   |   2 +-
 .../hadoop/hive/ql/session/SessionState.java| 106 ++---
 .../hadoop/hive/ql/exec/TestExecDriver.java |   7 +-
 .../ql/parse/TestMacroSemanticAnalyzer.java |   7 +-
 .../hadoop/hive/ql/parse/TestQBCompact.java |   8 +-
 .../ql/parse/TestQBJoinTreeApplyPredicate.java  |   7 +-
 .../hadoop/hive/ql/parse/TestQBSubQuery.java|   7 +-
 .../ql/parse/TestSemanticAnalyzerFactory.java   |   9 +-
 .../parse/TestUpdateDeleteSemanticAnalyzer.java |   9 +-
 .../authorization/AuthorizationTestUtil.java|  14 +--
 .../parse/authorization/PrivilegesTestBase.java |   5 +-
 .../TestHiveAuthorizationTaskFactory.java   |  12 +-
 .../parse/authorization/TestPrivilegesV1.java   |   9 +-
 .../parse/authorization/TestPrivilegesV2.java   |   8 +-
 .../hive/service/cli/operation/Operation.java   |  19 ++--
 .../service/cli/operation/SQLOperation.java |  49 ++--
 .../cli/operation/SQLOperationDisplay.java  |   2 +-
 .../service/cli/session/HiveSessionImpl.java|  40 +++
 65 files changed, 508 insertions(+), 437 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/caa3ec76/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
--
diff --git a/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java 
b/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
index 

[1/2] hive git commit: HIVE-13424: Refactoring the code to pass a QueryState object rather than HiveConf object (Reviewed by Sergey Shelukhin)

2016-04-25 Thread aihuaxu
Repository: hive
Updated Branches:
  refs/heads/master 86bdcbcd3 -> caa3ec761


http://git-wip-us.apache.org/repos/asf/hive/blob/caa3ec76/ql/src/java/org/apache/hadoop/hive/ql/parse/ProcessAnalyzeTable.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/ProcessAnalyzeTable.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/ProcessAnalyzeTable.java
index 93b7a66..c13a404 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ProcessAnalyzeTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ProcessAnalyzeTable.java
@@ -190,7 +190,7 @@ public class ProcessAnalyzeTable implements NodeProcessor {
 // partial scan task
 DriverContext driverCxt = new DriverContext();
 Task partialScanTask = TaskFactory.get(scanWork, 
parseContext.getConf());
-partialScanTask.initialize(parseContext.getConf(), null, driverCxt,
+partialScanTask.initialize(parseContext.getQueryState(), null, driverCxt,
 tableScan.getCompilationOpContext());
 partialScanTask.setWork(scanWork);
 statsWork.setSourceTask(partialScanTask);

http://git-wip-us.apache.org/repos/asf/hive/blob/caa3ec76/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 9af7749..11fd2c7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -72,6 +72,7 @@ import org.apache.hadoop.hive.ql.CompilationOpContext;
 import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.QueryProperties;
+import org.apache.hadoop.hive.ql.QueryState;
 import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.ArchiveUtils;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
@@ -340,8 +341,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 int nextNum;
   }
 
-  public SemanticAnalyzer(HiveConf conf) throws SemanticException {
-super(conf);
+  public SemanticAnalyzer(QueryState queryState) throws SemanticException {
+super(queryState);
 opToPartPruner = new HashMap();
 opToPartList = new HashMap();
 opToSamplePruner = new HashMap();
@@ -442,7 +443,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
   public ParseContext getParseContext() {
 // Make sure the basic query properties are initialized
 copyInfoToQueryProperties(queryProperties);
-return new ParseContext(conf, opToPartPruner, opToPartList, topOps,
+return new ParseContext(queryState, opToPartPruner, opToPartList, topOps,
 new HashSet(joinContext.keySet()),
 new HashSet(smbMapJoinContext.keySet()),
 loadTableWork, loadFileWork, ctx, idToTableNameMap, destTableId, uCtx,
@@ -1197,18 +1198,18 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
 createTable.addChild(temporary);
 createTable.addChild(cte.cteNode);
 
-SemanticAnalyzer analyzer = new SemanticAnalyzer(conf);
+SemanticAnalyzer analyzer = new SemanticAnalyzer(queryState);
 analyzer.initCtx(ctx);
 analyzer.init(false);
 
 // should share cte contexts
 analyzer.aliasToCTEs.putAll(aliasToCTEs);
 
-HiveOperation operation = SessionState.get().getHiveOperation();
+HiveOperation operation = queryState.getHiveOperation();
 try {
   analyzer.analyzeInternal(createTable);
 } finally {
-  SessionState.get().setCommandType(operation);
+  queryState.setCommandType(operation);
 }
 
 Table table = analyzer.tableDesc.toTable(conf);
@@ -6977,7 +6978,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
 if (ltd != null && SessionState.get() != null) {
   SessionState.get().getLineageState()
   .mapDirToFop(ltd.getSourcePath(), (FileSinkOperator) output);
-} else if ( 
SessionState.get().getCommandType().equals(HiveOperation.CREATETABLE_AS_SELECT.getOperationName()))
 {
+} else if ( 
queryState.getCommandType().equals(HiveOperation.CREATETABLE_AS_SELECT.getOperationName()))
 {
 
   Path tlocation = null;
   String tName = Utilities.getDbTableName(tableDesc.getTableName())[1];
@@ -9340,7 +9341,7 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
 limit.intValue(), extraMRStep);
 qb.getParseInfo().setOuterQueryLimit(limit.intValue());
   }
-  if 
(!SessionState.get().getHiveOperation().equals(HiveOperation.CREATEVIEW)) {
+  if (!queryState.getHiveOperation().equals(HiveOperation.CREATEVIEW)) {
 curr = genFileSinkPlan(dest, qb, curr);
   }
 }
@@ -10345,7 

hive git commit: HIVE-13591: TestSchemaTool is failing on master (Hari Subramaniyan, reviewed by Ashutosh Chauhan)

2016-04-25 Thread harisankar
Repository: hive
Updated Branches:
  refs/heads/master f13ee0897 -> 86bdcbcd3


HIVE-13591: TestSchemaTool is failing on master (Hari Subramaniyan, reviewed by 
Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/86bdcbcd
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/86bdcbcd
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/86bdcbcd

Branch: refs/heads/master
Commit: 86bdcbcd37bcca19d50aa8a79f171ec6ddca0157
Parents: f13ee08
Author: Hari Subramaniyan 
Authored: Mon Apr 25 12:14:14 2016 -0700
Committer: Hari Subramaniyan 
Committed: Mon Apr 25 12:14:14 2016 -0700

--
 metastore/scripts/upgrade/derby/hive-schema-2.1.0.derby.sql | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/86bdcbcd/metastore/scripts/upgrade/derby/hive-schema-2.1.0.derby.sql
--
diff --git a/metastore/scripts/upgrade/derby/hive-schema-2.1.0.derby.sql 
b/metastore/scripts/upgrade/derby/hive-schema-2.1.0.derby.sql
index 2ef7223..1d00499 100644
--- a/metastore/scripts/upgrade/derby/hive-schema-2.1.0.derby.sql
+++ b/metastore/scripts/upgrade/derby/hive-schema-2.1.0.derby.sql
@@ -108,7 +108,7 @@ CREATE TABLE "APP"."NOTIFICATION_LOG" ("NL_ID" BIGINT NOT 
NULL, "DB_NAME" VARCHA
 
 CREATE TABLE "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID" BIGINT NOT NULL, 
"NEXT_EVENT_ID" BIGINT NOT NULL);
 
-CREATE TABLE "APP"."KEY_CONSTRAINTS" ("CHILD_CD_ID" BIGINT, "CHILD_TBL_ID" 
BIGINT, "PARENT_CD_ID" BIGINT NOT NULL, "PARENT_TBL_ID" BIGINT NOT NULL,  
"POSITION" BIGINT NOT NULL, "CONSTRAINT_NAME" VARCHAR(400 NOT NULL, 
"CONSTRAINT_TYPE" SMALLINT NOT NULL, "UPDATE_RULE" SMALLINT, "DELETE_RULE" 
SMALLINT, "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL);
+CREATE TABLE "APP"."KEY_CONSTRAINTS" ("CHILD_CD_ID" BIGINT, "CHILD_TBL_ID" 
BIGINT, "PARENT_CD_ID" BIGINT NOT NULL, "PARENT_TBL_ID" BIGINT NOT NULL,  
"POSITION" BIGINT NOT NULL, "CONSTRAINT_NAME" VARCHAR(400) NOT NULL, 
"CONSTRAINT_TYPE" SMALLINT NOT NULL, "UPDATE_RULE" SMALLINT, "DELETE_RULE" 
SMALLINT, "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL);
 
 ALTER TABLE "APP"."KEY_CONSTRAINTS" ADD CONSTRAINT "CONSTRAINTS_PK" PRIMARY 
KEY ("CONSTRAINT_NAME", "POSITION");
 



[5/6] hive git commit: HIVE-13130: HS2 changes : API calls for retrieving primary keys and foreign keys information (Hari Subramaniyan, reviewed by Ashutosh Chauhan)

2016-04-25 Thread harisankar
http://git-wip-us.apache.org/repos/asf/hive/blob/f13ee089/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.cpp
--
diff --git a/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.cpp 
b/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.cpp
index 395af2c..66f5e8c 100644
--- a/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.cpp
+++ b/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.cpp
@@ -7542,6 +7542,604 @@ void TGetFunctionsResp::printTo(std::ostream& out) 
const {
 }
 
 
+TGetPrimaryKeysReq::~TGetPrimaryKeysReq() throw() {
+}
+
+
+void TGetPrimaryKeysReq::__set_sessionHandle(const TSessionHandle& val) {
+  this->sessionHandle = val;
+}
+
+void TGetPrimaryKeysReq::__set_catalogName(const TIdentifier& val) {
+  this->catalogName = val;
+__isset.catalogName = true;
+}
+
+void TGetPrimaryKeysReq::__set_schemaName(const TIdentifier& val) {
+  this->schemaName = val;
+__isset.schemaName = true;
+}
+
+void TGetPrimaryKeysReq::__set_tableName(const TIdentifier& val) {
+  this->tableName = val;
+__isset.tableName = true;
+}
+
+uint32_t TGetPrimaryKeysReq::read(::apache::thrift::protocol::TProtocol* 
iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+  bool isset_sessionHandle = false;
+
+  while (true)
+  {
+xfer += iprot->readFieldBegin(fname, ftype, fid);
+if (ftype == ::apache::thrift::protocol::T_STOP) {
+  break;
+}
+switch (fid)
+{
+  case 1:
+if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+  xfer += this->sessionHandle.read(iprot);
+  isset_sessionHandle = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  case 2:
+if (ftype == ::apache::thrift::protocol::T_STRING) {
+  xfer += iprot->readString(this->catalogName);
+  this->__isset.catalogName = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  case 3:
+if (ftype == ::apache::thrift::protocol::T_STRING) {
+  xfer += iprot->readString(this->schemaName);
+  this->__isset.schemaName = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  case 4:
+if (ftype == ::apache::thrift::protocol::T_STRING) {
+  xfer += iprot->readString(this->tableName);
+  this->__isset.tableName = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  default:
+xfer += iprot->skip(ftype);
+break;
+}
+xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  if (!isset_sessionHandle)
+throw TProtocolException(TProtocolException::INVALID_DATA);
+  return xfer;
+}
+
+uint32_t TGetPrimaryKeysReq::write(::apache::thrift::protocol::TProtocol* 
oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("TGetPrimaryKeysReq");
+
+  xfer += oprot->writeFieldBegin("sessionHandle", 
::apache::thrift::protocol::T_STRUCT, 1);
+  xfer += this->sessionHandle.write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  if (this->__isset.catalogName) {
+xfer += oprot->writeFieldBegin("catalogName", 
::apache::thrift::protocol::T_STRING, 2);
+xfer += oprot->writeString(this->catalogName);
+xfer += oprot->writeFieldEnd();
+  }
+  if (this->__isset.schemaName) {
+xfer += oprot->writeFieldBegin("schemaName", 
::apache::thrift::protocol::T_STRING, 3);
+xfer += oprot->writeString(this->schemaName);
+xfer += oprot->writeFieldEnd();
+  }
+  if (this->__isset.tableName) {
+xfer += oprot->writeFieldBegin("tableName", 
::apache::thrift::protocol::T_STRING, 4);
+xfer += oprot->writeString(this->tableName);
+xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(TGetPrimaryKeysReq , TGetPrimaryKeysReq ) {
+  using ::std::swap;
+  swap(a.sessionHandle, b.sessionHandle);
+  swap(a.catalogName, b.catalogName);
+  swap(a.schemaName, b.schemaName);
+  swap(a.tableName, b.tableName);
+  swap(a.__isset, b.__isset);
+}
+
+TGetPrimaryKeysReq::TGetPrimaryKeysReq(const TGetPrimaryKeysReq& other260) {
+  sessionHandle = other260.sessionHandle;
+  catalogName = other260.catalogName;
+  schemaName = other260.schemaName;
+  tableName = other260.tableName;
+  __isset = other260.__isset;
+}
+TGetPrimaryKeysReq& TGetPrimaryKeysReq::operator=(const TGetPrimaryKeysReq& 
other261) {
+  sessionHandle = other261.sessionHandle;
+  catalogName = other261.catalogName;
+  schemaName = other261.schemaName;
+  tableName = other261.tableName;
+  __isset = other261.__isset;
+  

[3/6] hive git commit: HIVE-13130: HS2 changes : API calls for retrieving primary keys and foreign keys information (Hari Subramaniyan, reviewed by Ashutosh Chauhan)

2016-04-25 Thread harisankar
http://git-wip-us.apache.org/repos/asf/hive/blob/f13ee089/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetCrossReferenceReq.java
--
diff --git 
a/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetCrossReferenceReq.java
 
b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetCrossReferenceReq.java
new file mode 100644
index 000..9729570
--- /dev/null
+++ 
b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetCrossReferenceReq.java
@@ -0,0 +1,1034 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hive.service.rpc.thrift;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+public class TGetCrossReferenceReq implements 
org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TGetCrossReferenceReq");
+
+  private static final org.apache.thrift.protocol.TField 
SESSION_HANDLE_FIELD_DESC = new 
org.apache.thrift.protocol.TField("sessionHandle", 
org.apache.thrift.protocol.TType.STRUCT, (short)1);
+  private static final org.apache.thrift.protocol.TField 
PARENT_CATALOG_NAME_FIELD_DESC = new 
org.apache.thrift.protocol.TField("parentCatalogName", 
org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField 
PARENT_SCHEMA_NAME_FIELD_DESC = new 
org.apache.thrift.protocol.TField("parentSchemaName", 
org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField 
PARENT_TABLE_NAME_FIELD_DESC = new 
org.apache.thrift.protocol.TField("parentTableName", 
org.apache.thrift.protocol.TType.STRING, (short)4);
+  private static final org.apache.thrift.protocol.TField 
FOREIGN_CATALOG_NAME_FIELD_DESC = new 
org.apache.thrift.protocol.TField("foreignCatalogName", 
org.apache.thrift.protocol.TType.STRING, (short)5);
+  private static final org.apache.thrift.protocol.TField 
FOREIGN_SCHEMA_NAME_FIELD_DESC = new 
org.apache.thrift.protocol.TField("foreignSchemaName", 
org.apache.thrift.protocol.TType.STRING, (short)6);
+  private static final org.apache.thrift.protocol.TField 
FOREIGN_TABLE_NAME_FIELD_DESC = new 
org.apache.thrift.protocol.TField("foreignTableName", 
org.apache.thrift.protocol.TType.STRING, (short)7);
+
+  private static final Map schemes = 
new HashMap();
+  static {
+schemes.put(StandardScheme.class, new 
TGetCrossReferenceReqStandardSchemeFactory());
+schemes.put(TupleScheme.class, new 
TGetCrossReferenceReqTupleSchemeFactory());
+  }
+
+  private TSessionHandle sessionHandle; // required
+  private String parentCatalogName; // optional
+  private String parentSchemaName; // optional
+  private String parentTableName; // optional
+  private String foreignCatalogName; // optional
+  private String foreignSchemaName; // optional
+  private String foreignTableName; // optional
+
+  /** The set of fields this struct contains, along with convenience methods 
for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+SESSION_HANDLE((short)1, "sessionHandle"),
+PARENT_CATALOG_NAME((short)2, "parentCatalogName"),
+PARENT_SCHEMA_NAME((short)3, "parentSchemaName"),
+PARENT_TABLE_NAME((short)4, "parentTableName"),
+FOREIGN_CATALOG_NAME((short)5, "foreignCatalogName"),
+FOREIGN_SCHEMA_NAME((short)6, "foreignSchemaName"),
+FOREIGN_TABLE_NAME((short)7, "foreignTableName");
+
+private static final Map byName = new HashMap();
+
+static {
+  for (_Fields field : EnumSet.allOf(_Fields.class)) {
+

[2/6] hive git commit: HIVE-13130: HS2 changes : API calls for retrieving primary keys and foreign keys information (Hari Subramaniyan, reviewed by Ashutosh Chauhan)

2016-04-25 Thread harisankar
http://git-wip-us.apache.org/repos/asf/hive/blob/f13ee089/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetPrimaryKeysResp.java
--
diff --git 
a/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetPrimaryKeysResp.java
 
b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetPrimaryKeysResp.java
new file mode 100644
index 000..72d9507
--- /dev/null
+++ 
b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetPrimaryKeysResp.java
@@ -0,0 +1,509 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hive.service.rpc.thrift;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+public class TGetPrimaryKeysResp implements 
org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TGetPrimaryKeysResp");
+
+  private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = 
new org.apache.thrift.protocol.TField("status", 
org.apache.thrift.protocol.TType.STRUCT, (short)1);
+  private static final org.apache.thrift.protocol.TField 
OPERATION_HANDLE_FIELD_DESC = new 
org.apache.thrift.protocol.TField("operationHandle", 
org.apache.thrift.protocol.TType.STRUCT, (short)2);
+
+  private static final Map schemes = 
new HashMap();
+  static {
+schemes.put(StandardScheme.class, new 
TGetPrimaryKeysRespStandardSchemeFactory());
+schemes.put(TupleScheme.class, new 
TGetPrimaryKeysRespTupleSchemeFactory());
+  }
+
+  private TStatus status; // required
+  private TOperationHandle operationHandle; // optional
+
+  /** The set of fields this struct contains, along with convenience methods 
for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+STATUS((short)1, "status"),
+OPERATION_HANDLE((short)2, "operationHandle");
+
+private static final Map byName = new HashMap();
+
+static {
+  for (_Fields field : EnumSet.allOf(_Fields.class)) {
+byName.put(field.getFieldName(), field);
+  }
+}
+
+/**
+ * Find the _Fields constant that matches fieldId, or null if its not 
found.
+ */
+public static _Fields findByThriftId(int fieldId) {
+  switch(fieldId) {
+case 1: // STATUS
+  return STATUS;
+case 2: // OPERATION_HANDLE
+  return OPERATION_HANDLE;
+default:
+  return null;
+  }
+}
+
+/**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+public static _Fields findByThriftIdOrThrow(int fieldId) {
+  _Fields fields = findByThriftId(fieldId);
+  if (fields == null) throw new IllegalArgumentException("Field " + 
fieldId + " doesn't exist!");
+  return fields;
+}
+
+/**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+public static _Fields findByName(String name) {
+  return byName.get(name);
+}
+
+private final short _thriftId;
+private final String _fieldName;
+
+_Fields(short thriftId, String fieldName) {
+  _thriftId = thriftId;
+  _fieldName = fieldName;
+}
+
+public short getThriftFieldId() {
+  return _thriftId;
+}
+
+public String getFieldName() {
+  return _fieldName;
+}
+  }
+
+  // isset id assignments
+  private static final _Fields optionals[] = {_Fields.OPERATION_HANDLE};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> 
metaDataMap;
+  static {
+Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new 

[4/6] hive git commit: HIVE-13130: HS2 changes : API calls for retrieving primary keys and foreign keys information (Hari Subramaniyan, reviewed by Ashutosh Chauhan)

2016-04-25 Thread harisankar
http://git-wip-us.apache.org/repos/asf/hive/blob/f13ee089/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TCLIService.java
--
diff --git 
a/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TCLIService.java
 
b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TCLIService.java
index c684f89..6dba051 100644
--- 
a/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TCLIService.java
+++ 
b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TCLIService.java
@@ -61,6 +61,10 @@ public class TCLIService {
 
 public TGetFunctionsResp GetFunctions(TGetFunctionsReq req) throws 
org.apache.thrift.TException;
 
+public TGetPrimaryKeysResp GetPrimaryKeys(TGetPrimaryKeysReq req) throws 
org.apache.thrift.TException;
+
+public TGetCrossReferenceResp GetCrossReference(TGetCrossReferenceReq req) 
throws org.apache.thrift.TException;
+
 public TGetOperationStatusResp GetOperationStatus(TGetOperationStatusReq 
req) throws org.apache.thrift.TException;
 
 public TCancelOperationResp CancelOperation(TCancelOperationReq req) 
throws org.apache.thrift.TException;
@@ -103,6 +107,10 @@ public class TCLIService {
 
 public void GetFunctions(TGetFunctionsReq req, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
 
+public void GetPrimaryKeys(TGetPrimaryKeysReq req, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
+
+public void GetCrossReference(TGetCrossReferenceReq req, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
+
 public void GetOperationStatus(TGetOperationStatusReq req, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
 
 public void CancelOperation(TCancelOperationReq req, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
@@ -394,6 +402,52 @@ public class TCLIService {
   throw new 
org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT,
 "GetFunctions failed: unknown result");
 }
 
+public TGetPrimaryKeysResp GetPrimaryKeys(TGetPrimaryKeysReq req) throws 
org.apache.thrift.TException
+{
+  send_GetPrimaryKeys(req);
+  return recv_GetPrimaryKeys();
+}
+
+public void send_GetPrimaryKeys(TGetPrimaryKeysReq req) throws 
org.apache.thrift.TException
+{
+  GetPrimaryKeys_args args = new GetPrimaryKeys_args();
+  args.setReq(req);
+  sendBase("GetPrimaryKeys", args);
+}
+
+public TGetPrimaryKeysResp recv_GetPrimaryKeys() throws 
org.apache.thrift.TException
+{
+  GetPrimaryKeys_result result = new GetPrimaryKeys_result();
+  receiveBase(result, "GetPrimaryKeys");
+  if (result.isSetSuccess()) {
+return result.success;
+  }
+  throw new 
org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT,
 "GetPrimaryKeys failed: unknown result");
+}
+
+public TGetCrossReferenceResp GetCrossReference(TGetCrossReferenceReq req) 
throws org.apache.thrift.TException
+{
+  send_GetCrossReference(req);
+  return recv_GetCrossReference();
+}
+
+public void send_GetCrossReference(TGetCrossReferenceReq req) throws 
org.apache.thrift.TException
+{
+  GetCrossReference_args args = new GetCrossReference_args();
+  args.setReq(req);
+  sendBase("GetCrossReference", args);
+}
+
+public TGetCrossReferenceResp recv_GetCrossReference() throws 
org.apache.thrift.TException
+{
+  GetCrossReference_result result = new GetCrossReference_result();
+  receiveBase(result, "GetCrossReference");
+  if (result.isSetSuccess()) {
+return result.success;
+  }
+  throw new 
org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT,
 "GetCrossReference failed: unknown result");
+}
+
 public TGetOperationStatusResp GetOperationStatus(TGetOperationStatusReq 
req) throws org.apache.thrift.TException
 {
   send_GetOperationStatus(req);
@@ -948,6 +1002,70 @@ public class TCLIService {
   }
 }
 
+public void GetPrimaryKeys(TGetPrimaryKeysReq req, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException {
+  checkReady();
+  GetPrimaryKeys_call method_call = new GetPrimaryKeys_call(req, 
resultHandler, this, ___protocolFactory, ___transport);
+  this.___currentMethod = method_call;
+  ___manager.call(method_call);
+}
+
+public static class GetPrimaryKeys_call extends 
org.apache.thrift.async.TAsyncMethodCall {
+  private TGetPrimaryKeysReq req;
+  public GetPrimaryKeys_call(TGetPrimaryKeysReq req, 

[6/6] hive git commit: HIVE-13130: HS2 changes : API calls for retrieving primary keys and foreign keys information (Hari Subramaniyan, reviewed by Ashutosh Chauhan)

2016-04-25 Thread harisankar
HIVE-13130: HS2 changes : API calls for retrieving primary keys and foreign 
keys information (Hari Subramaniyan, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f13ee089
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f13ee089
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f13ee089

Branch: refs/heads/master
Commit: f13ee089797c3b0a61fd6b053526b8da46ec4d36
Parents: 4536dcd
Author: Hari Subramaniyan 
Authored: Mon Apr 25 12:10:52 2016 -0700
Committer: Hari Subramaniyan 
Committed: Mon Apr 25 12:10:52 2016 -0700

--
 .../org/apache/hive/jdbc/TestJdbcDriver2.java   |5 +-
 .../apache/hive/jdbc/HiveDatabaseMetaData.java  |   49 +-
 service-rpc/if/TCLIService.thrift   |   50 +
 .../src/gen/thrift/gen-cpp/TCLIService.cpp  |  812 -
 .../src/gen/thrift/gen-cpp/TCLIService.h|  252 +++
 .../gen-cpp/TCLIService_server.skeleton.cpp |   10 +
 .../gen/thrift/gen-cpp/TCLIService_types.cpp|  836 +++--
 .../src/gen/thrift/gen-cpp/TCLIService_types.h  |  276 +++
 .../hive/service/rpc/thrift/TCLIService.java| 1716 ++
 .../rpc/thrift/TGetCrossReferenceReq.java   | 1034 +++
 .../rpc/thrift/TGetCrossReferenceResp.java  |  509 ++
 .../service/rpc/thrift/TGetPrimaryKeysReq.java  |  716 
 .../service/rpc/thrift/TGetPrimaryKeysResp.java |  509 ++
 .../src/gen/thrift/gen-php/TCLIService.php  |  432 +
 service-rpc/src/gen/thrift/gen-php/Types.php|  583 ++
 .../gen-py/TCLIService/TCLIService-remote   |   14 +
 .../thrift/gen-py/TCLIService/TCLIService.py|  378 
 .../src/gen/thrift/gen-py/TCLIService/ttypes.py |  417 +
 .../src/gen/thrift/gen-rb/t_c_l_i_service.rb|  108 ++
 .../gen/thrift/gen-rb/t_c_l_i_service_types.rb  |   90 +
 .../org/apache/hive/service/cli/CLIService.java |   30 +
 .../service/cli/EmbeddedCLIServiceClient.java   |   15 +
 .../apache/hive/service/cli/ICLIService.java|8 +
 .../operation/GetCrossReferenceOperation.java   |  169 ++
 .../cli/operation/GetPrimaryKeysOperation.java  |  126 ++
 .../service/cli/operation/OperationManager.java |   19 +
 .../hive/service/cli/session/HiveSession.java   |   28 +
 .../service/cli/session/HiveSessionImpl.java|   47 +
 .../thrift/RetryingThriftCLIServiceClient.java  |   16 +
 .../service/cli/thrift/ThriftCLIService.java|   39 +
 .../cli/thrift/ThriftCLIServiceClient.java  |   47 +
 31 files changed, 9188 insertions(+), 152 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/f13ee089/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
index 7028c25..965627f 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
@@ -369,7 +369,7 @@ public class TestJdbcDriver2 {
 assertNull(rs.getStatement());
 rs.close();
 
-rs = md.getPrimaryKeys(null, null, null);
+rs = md.getPrimaryKeys(null, "testdb", tableName);
 assertNull(rs.getStatement());
 rs.close();
 
@@ -2145,8 +2145,7 @@ public void testParseUrlHttpMode() throws SQLException, 
JdbcUriParseException,
   public void testPrimaryKeys() throws SQLException {
 DatabaseMetaData dbmd = con.getMetaData();
 assertNotNull(dbmd);
-// currently getPrimaryKeys always returns an empty resultset for Hive
-ResultSet res = dbmd.getPrimaryKeys(null, null, null);
+ResultSet res = dbmd.getPrimaryKeys(null, "testdb", tableName);
 ResultSetMetaData md = res.getMetaData();
 assertEquals(md.getColumnCount(), 6);
 assertFalse(res.next());

http://git-wip-us.apache.org/repos/asf/hive/blob/f13ee089/jdbc/src/java/org/apache/hive/jdbc/HiveDatabaseMetaData.java
--
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveDatabaseMetaData.java 
b/jdbc/src/java/org/apache/hive/jdbc/HiveDatabaseMetaData.java
index 7e54d1f..9d73470 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveDatabaseMetaData.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveDatabaseMetaData.java
@@ -35,11 +35,15 @@ import org.apache.hive.service.rpc.thrift.TGetCatalogsReq;
 import org.apache.hive.service.rpc.thrift.TGetCatalogsResp;
 import org.apache.hive.service.rpc.thrift.TGetColumnsReq;
 import org.apache.hive.service.rpc.thrift.TGetColumnsResp;
+import org.apache.hive.service.rpc.thrift.TGetCrossReferenceReq;
+import 

[1/6] hive git commit: HIVE-13130: HS2 changes : API calls for retrieving primary keys and foreign keys information (Hari Subramaniyan, reviewed by Ashutosh Chauhan)

2016-04-25 Thread harisankar
Repository: hive
Updated Branches:
  refs/heads/master 4536dcd68 -> f13ee0897


http://git-wip-us.apache.org/repos/asf/hive/blob/f13ee089/service/src/java/org/apache/hive/service/cli/ICLIService.java
--
diff --git a/service/src/java/org/apache/hive/service/cli/ICLIService.java 
b/service/src/java/org/apache/hive/service/cli/ICLIService.java
index 0a54bdd..e4aef96 100644
--- a/service/src/java/org/apache/hive/service/cli/ICLIService.java
+++ b/service/src/java/org/apache/hive/service/cli/ICLIService.java
@@ -98,5 +98,13 @@ public interface ICLIService {
   void renewDelegationToken(SessionHandle sessionHandle, HiveAuthFactory 
authFactory,
   String tokenStr) throws HiveSQLException;
 
+  OperationHandle getPrimaryKeys(SessionHandle sessionHandle, String catalog,
+String schema, String table) throws HiveSQLException;
+
+  OperationHandle getCrossReference(SessionHandle sessionHandle,
+String primaryCatalog, String primarySchema, String primaryTable,
+String foreignCatalog, String foreignSchema, String foreignTable)
+throws HiveSQLException;
+
 
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/f13ee089/service/src/java/org/apache/hive/service/cli/operation/GetCrossReferenceOperation.java
--
diff --git 
a/service/src/java/org/apache/hive/service/cli/operation/GetCrossReferenceOperation.java
 
b/service/src/java/org/apache/hive/service/cli/operation/GetCrossReferenceOperation.java
new file mode 100644
index 000..a6439b6
--- /dev/null
+++ 
b/service/src/java/org/apache/hive/service/cli/operation/GetCrossReferenceOperation.java
@@ -0,0 +1,169 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest;
+import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+import org.apache.hive.service.cli.FetchOrientation;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.OperationState;
+import org.apache.hive.service.cli.OperationType;
+import org.apache.hive.service.cli.RowSet;
+import org.apache.hive.service.cli.RowSetFactory;
+import org.apache.hive.service.cli.TableSchema;
+import org.apache.hadoop.hive.serde2.thrift.Type;
+import org.apache.hive.service.cli.session.HiveSession;
+
+/**
+ * GetCrossReferenceOperation.
+ *
+ */
+public class GetCrossReferenceOperation extends MetadataOperation {
+  /**
+  PKTABLE_CAT String => parent key table catalog (may be null)
+  PKTABLE_SCHEM String => parent key table schema (may be null)
+  PKTABLE_NAME String => parent key table name
+  PKCOLUMN_NAME String => parent key column name
+  FKTABLE_CAT String => foreign key table catalog (may be null) being exported 
(may be null)
+  FKTABLE_SCHEM String => foreign key table schema (may be null) being 
exported (may be null)
+  FKTABLE_NAME String => foreign key table name being exported
+  FKCOLUMN_NAME String => foreign key column name being exported
+  KEY_SEQ short => sequence number within foreign key( a value of 1 represents 
the first column of the foreign key, a value of 2 would represent the second 
column within the foreign key).
+  UPDATE_RULE short => What happens to foreign key when parent key is updated:
+  importedNoAction - do not allow update of parent key if it has been imported
+  importedKeyCascade - change imported key to agree with parent key update
+  importedKeySetNull - change imported key to NULL if its parent key has been 
updated
+  importedKeySetDefault - change imported key to default values if its parent 
key has been updated
+  importedKeyRestrict - same as importedKeyNoAction (for ODBC 2.x 
compatibility)
+  DELETE_RULE short => What happens to the foreign key when parent key is 
deleted.
+  importedKeyNoAction - do not allow delete of parent key if it has been 
imported
+  importedKeyCascade - delete rows that import a deleted key
+  importedKeySetNull - change imported key to NULL if its 

hive git commit: HIVE-12637 : make retryable SQLExceptions in TxnHandler configurable (Wei Zheng, reviewed by Eugene Koifman)

2016-04-25 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-1 edf89a6a0 -> 648f19307


HIVE-12637 : make retryable SQLExceptions in TxnHandler configurable (Wei 
Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/648f1930
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/648f1930
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/648f1930

Branch: refs/heads/branch-1
Commit: 648f19307cab1b55e44b930ffaf043cc93cd4d46
Parents: edf89a6
Author: Wei Zheng 
Authored: Mon Apr 25 11:17:11 2016 -0700
Committer: Wei Zheng 
Committed: Mon Apr 25 11:19:35 2016 -0700

--
 .../org/apache/hadoop/hive/conf/HiveConf.java |  8 
 .../hadoop/hive/metastore/txn/TxnHandler.java | 18 +++---
 .../hadoop/hive/metastore/txn/TestTxnHandler.java | 15 +++
 3 files changed, 38 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/648f1930/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 0d31131..7c93e44 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -173,6 +173,7 @@ public class HiveConf extends Configuration {
   HiveConf.ConfVars.HIVE_TXN_TIMEOUT,
   HiveConf.ConfVars.HIVE_TXN_HEARTBEAT_THREADPOOL_SIZE,
   HiveConf.ConfVars.HIVE_TXN_MAX_OPEN_BATCH,
+  HiveConf.ConfVars.HIVE_TXN_RETRYABLE_SQLEX_REGEX,
   HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION,
   HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_ENABLED,
   HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_SIZE,
@@ -1492,6 +1493,13 @@ public class HiveConf extends Configuration {
 "transactions that Hive has to track at any given time, which may 
negatively affect\n" +
 "read performance."),
 
+HIVE_TXN_RETRYABLE_SQLEX_REGEX("hive.txn.retryable.sqlex.regex", "", 
"Comma separated list\n" +
+"of regular expression patterns for SQL state, error code, and error 
message of\n" +
+"retryable SQLExceptions, that's suitable for the metastore DB.\n" +
+"For example: Can't serialize.*,40001$,^Deadlock,.*ORA-08176.*\n" +
+"The string that the regex will be matched against is of the following 
form, where ex is a SQLException:\n" +
+"ex.getMessage() + \" (SQLState=\" + ex.getSQLState() + \", 
ErrorCode=\" + ex.getErrorCode() + \")\""),
+
 HIVE_COMPACTOR_INITIATOR_ON("hive.compactor.initiator.on", false,
 "Whether to run the initiator and cleaner threads on this metastore 
instance or not.\n" +
 "Set this to true on one instance of the Thrift metastore service as 
part of turning\n" +

http://git-wip-us.apache.org/repos/asf/hive/blob/648f1930/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index ed4a3c2..a64e7c8 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -51,6 +51,7 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.ReentrantLock;
+import java.util.regex.Pattern;
 
 /**
  * A handler to answer transaction related calls that come into the metastore
@@ -1559,7 +1560,7 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
 } else {
   LOG.error("Too many repeated deadlocks in " + caller + ", giving 
up.");
 }
-  } else if (isRetryable(e)) {
+  } else if (isRetryable(conf, e)) {
 //in MSSQL this means Communication Link Failure
 if (retryNum++ < retryLimit) {
   LOG.warn("Retryable error detected in " + caller + ".  Will wait " + 
retryInterval +
@@ -2658,7 +2659,7 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
   /**
* Returns true if {@code ex} should be retried
*/
-  private static boolean isRetryable(Exception ex) {
+  static boolean isRetryable(HiveConf conf, Exception ex) {
 if(ex instanceof SQLException) {
   SQLException sqlException = (SQLException)ex;
   if("08S01".equalsIgnoreCase(sqlException.getSQLState())) {
@@ -2669,6 +2670,17 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
 

[01/18] hive git commit: HIVE-13533: Remove AST dump (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

2016-04-25 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/master 37db169a8 -> 6908198df


http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/vectorized_ptf.q.out
--
diff --git a/ql/src/test/results/clientpositive/vectorized_ptf.q.out 
b/ql/src/test/results/clientpositive/vectorized_ptf.q.out
index 3b17591..fc4351d 100644
--- a/ql/src/test/results/clientpositive/vectorized_ptf.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_ptf.q.out
@@ -142,88 +142,6 @@ from noop(on part_orc
   order by p_name
   )
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_PTBLFUNCTION
- noop
- TOK_TABREF
-TOK_TABNAME
-   part_orc
- TOK_PARTITIONINGSPEC
-TOK_DISTRIBUTEBY
-   TOK_TABLE_OR_COL
-  p_mfgr
-TOK_ORDERBY
-   TOK_TABSORTCOLNAMEASC
-  TOK_NULLS_FIRST
- TOK_TABLE_OR_COL
-p_name
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   p_mfgr
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   p_name
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   p_size
- TOK_SELEXPR
-TOK_FUNCTION
-   rank
-   TOK_WINDOWSPEC
-  TOK_PARTITIONINGSPEC
- TOK_DISTRIBUTEBY
-TOK_TABLE_OR_COL
-   p_mfgr
- TOK_ORDERBY
-TOK_TABSORTCOLNAMEASC
-   TOK_NULLS_FIRST
-  TOK_TABLE_OR_COL
- p_name
-r
- TOK_SELEXPR
-TOK_FUNCTION
-   dense_rank
-   TOK_WINDOWSPEC
-  TOK_PARTITIONINGSPEC
- TOK_DISTRIBUTEBY
-TOK_TABLE_OR_COL
-   p_mfgr
- TOK_ORDERBY
-TOK_TABSORTCOLNAMEASC
-   TOK_NULLS_FIRST
-  TOK_TABLE_OR_COL
- p_name
-dr
- TOK_SELEXPR
-TOK_FUNCTION
-   sum
-   TOK_TABLE_OR_COL
-  p_retailprice
-   TOK_WINDOWSPEC
-  TOK_PARTITIONINGSPEC
- TOK_DISTRIBUTEBY
-TOK_TABLE_OR_COL
-   p_mfgr
- TOK_ORDERBY
-TOK_TABSORTCOLNAMEASC
-   TOK_NULLS_FIRST
-  TOK_TABLE_OR_COL
- p_name
-  TOK_WINDOWRANGE
- preceding
-unbounded
- current
-s1
-
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
@@ -512,94 +430,6 @@ from noop (on (select p1.* from part_orc p1 join part_orc 
p2 on p1.p_partkey = p
 distribute by j.p_mfgr
 sort by j.p_name)
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_PTBLFUNCTION
- noop
- TOK_SUBQUERY
-TOK_QUERY
-   TOK_FROM
-  TOK_JOIN
- TOK_TABREF
-TOK_TABNAME
-   part_orc
-p1
- TOK_TABREF
-TOK_TABNAME
-   part_orc
-p2
- =
-.
-   TOK_TABLE_OR_COL
-  p1
-   p_partkey
-.
-   TOK_TABLE_OR_COL
-  p2
-   p_partkey
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-TOK_ALLCOLREF
-   TOK_TABNAME
-  p1
-j
- TOK_PARTITIONINGSPEC
-TOK_DISTRIBUTEBY
-   .
-  TOK_TABLE_OR_COL
- j
-  p_mfgr
-TOK_SORTBY
-   TOK_TABSORTCOLNAMEASC
-  TOK_NULLS_FIRST
- .
-TOK_TABLE_OR_COL
-   j
-p_name
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   p_mfgr
- 

[11/18] hive git commit: HIVE-13533: Remove AST dump (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

2016-04-25 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out
--
diff --git a/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out 
b/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out
index 9ee3462..a2a06b2 100644
--- a/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out
+++ b/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out
@@ -6,56 +6,6 @@ POSTHOOK: query: -- complex predicates in the where clause
 
 explain extended select a.* from srcpart a where rand(1) < 0.1 and a.ds = 
'2008-04-08' and not(key > 50 or key < 10) and a.hr like '%2'
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_TABREF
- TOK_TABNAME
-srcpart
- a
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-TOK_ALLCOLREF
-   TOK_TABNAME
-  a
-  TOK_WHERE
- and
-and
-   and
-  <
- TOK_FUNCTION
-rand
-1
- 0.1
-  =
- .
-TOK_TABLE_OR_COL
-   a
-ds
- '2008-04-08'
-   not
-  or
- >
-TOK_TABLE_OR_COL
-   key
-50
- <
-TOK_TABLE_OR_COL
-   key
-10
-like
-   .
-  TOK_TABLE_OR_COL
- a
-  hr
-   '%2'
-
-
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 
@@ -144,50 +94,6 @@ PREHOOK: type: QUERY
 POSTHOOK: query: -- without rand for comparison
 explain extended select a.* from srcpart a where a.ds = '2008-04-08' and 
not(key > 50 or key < 10) and a.hr like '%2'
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_TABREF
- TOK_TABNAME
-srcpart
- a
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-TOK_ALLCOLREF
-   TOK_TABNAME
-  a
-  TOK_WHERE
- and
-and
-   =
-  .
- TOK_TABLE_OR_COL
-a
- ds
-  '2008-04-08'
-   not
-  or
- >
-TOK_TABLE_OR_COL
-   key
-50
- <
-TOK_TABLE_OR_COL
-   key
-10
-like
-   .
-  TOK_TABLE_OR_COL
- a
-  hr
-   '%2'
-
-
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
 

http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/reduce_deduplicate.q.out
--
diff --git a/ql/src/test/results/clientpositive/reduce_deduplicate.q.out 
b/ql/src/test/results/clientpositive/reduce_deduplicate.q.out
index 96b51d7..379f884 100644
--- a/ql/src/test/results/clientpositive/reduce_deduplicate.q.out
+++ b/ql/src/test/results/clientpositive/reduce_deduplicate.q.out
@@ -14,26 +14,6 @@ POSTHOOK: query: explain extended
 insert overwrite table bucket5_1
 select * from src cluster by key
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_TABREF
- TOK_TABNAME
-src
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_TAB
-TOK_TABNAME
-   bucket5_1
-  TOK_SELECT
- TOK_SELEXPR
-TOK_ALLCOLREF
-  TOK_CLUSTERBY
- TOK_TABLE_OR_COL
-key
-
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -248,133 +228,6 @@ select s2.* from
 )s
 )s2
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_SUBQUERY
- TOK_QUERY
-TOK_FROM
-   TOK_SUBQUERY
-  TOK_QUERY
- TOK_FROM
-TOK_TABREF
-   TOK_TABNAME
-  complex_tbl_2
- TOK_INSERT
-TOK_DESTINATION
-   TOK_DIR
-  TOK_TMP_FILE
-TOK_SELECT
-   TOK_SELEXPR
-  TOK_TRANSFORM
- TOK_EXPLIST
- 

[12/18] hive git commit: HIVE-13533: Remove AST dump (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

2016-04-25 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/pcr.q.out
--
diff --git a/ql/src/test/results/clientpositive/pcr.q.out 
b/ql/src/test/results/clientpositive/pcr.q.out
index 684d4d7..b53226e 100644
--- a/ql/src/test/results/clientpositive/pcr.q.out
+++ b/ql/src/test/results/clientpositive/pcr.q.out
@@ -52,48 +52,6 @@ PREHOOK: query: explain extended select key, value, ds from 
pcr_t1 where ds<='20
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select key, value, ds from pcr_t1 where 
ds<='2000-04-09' and key<5 order by key, ds
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_TABREF
- TOK_TABNAME
-pcr_t1
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   key
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   value
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   ds
-  TOK_WHERE
- and
-<=
-   TOK_TABLE_OR_COL
-  ds
-   '2000-04-09'
-<
-   TOK_TABLE_OR_COL
-  key
-   5
-  TOK_ORDERBY
- TOK_TABSORTCOLNAMEASC
-TOK_NULLS_FIRST
-   TOK_TABLE_OR_COL
-  key
- TOK_TABSORTCOLNAMEASC
-TOK_NULLS_FIRST
-   TOK_TABLE_OR_COL
-  ds
-
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -279,41 +237,6 @@ PREHOOK: query: explain extended select key, value from 
pcr_t1 where ds<='2000-0
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select key, value from pcr_t1 where 
ds<='2000-04-09' or key<5 order by key
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_TABREF
- TOK_TABNAME
-pcr_t1
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   key
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   value
-  TOK_WHERE
- or
-<=
-   TOK_TABLE_OR_COL
-  ds
-   '2000-04-09'
-<
-   TOK_TABLE_OR_COL
-  key
-   5
-  TOK_ORDERBY
- TOK_TABSORTCOLNAMEASC
-TOK_NULLS_FIRST
-   TOK_TABLE_OR_COL
-  key
-
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -582,53 +505,6 @@ PREHOOK: query: explain extended select key, value, ds 
from pcr_t1 where ds<='20
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select key, value, ds from pcr_t1 where 
ds<='2000-04-09' and key<5 and value != 'val_2' order by key, ds
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_TABREF
- TOK_TABNAME
-pcr_t1
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   key
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   value
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   ds
-  TOK_WHERE
- and
-and
-   <=
-  TOK_TABLE_OR_COL
- ds
-  '2000-04-09'
-   <
-  TOK_TABLE_OR_COL
- key
-  5
-!=
-   TOK_TABLE_OR_COL
-  value
-   'val_2'
-  TOK_ORDERBY
- TOK_TABSORTCOLNAMEASC
-TOK_NULLS_FIRST
-   TOK_TABLE_OR_COL
-  key
- TOK_TABSORTCOLNAMEASC
-TOK_NULLS_FIRST
-   TOK_TABLE_OR_COL
-  ds
-
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -816,58 +692,6 @@ POSTHOOK: query: explain extended
 select key, value, ds from pcr_t1
 where (ds < '2000-04-09' and key < 5) or (ds > '2000-04-09' and value == 
'val_5') order by key, ds
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_TABREF
- TOK_TABNAME
-pcr_t1
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   key
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   value
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   ds
-  TOK_WHERE
- or
-and
-   <
-  TOK_TABLE_OR_COL
- ds
-  '2000-04-09'
-   <
-  

[04/18] hive git commit: HIVE-13533: Remove AST dump (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

2016-04-25 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/tez/tez_join_result_complex.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/tez/tez_join_result_complex.q.out 
b/ql/src/test/results/clientpositive/tez/tez_join_result_complex.q.out
index 180bcc6..3e3b08c 100644
--- a/ql/src/test/results/clientpositive/tez/tez_join_result_complex.q.out
+++ b/ql/src/test/results/clientpositive/tez/tez_join_result_complex.q.out
@@ -148,271 +148,6 @@ inner join
 service_request_clean b
 on a.contact_event_id = b.cnctevn_id
 POSTHOOK: type: CREATETABLE_AS_SELECT
-ABSTRACT SYNTAX TREE:
-  
-TOK_CREATETABLE
-   TOK_TABNAME
-  ct_events1_test
-   TOK_LIKETABLE
-   TOK_QUERY
-  TOK_FROM
- TOK_JOIN
-TOK_SUBQUERY
-   TOK_QUERY
-  TOK_FROM
- TOK_TABREF
-TOK_TABNAME
-   default
-   ct_events_clean
-a
-  TOK_INSERT
- TOK_DESTINATION
-TOK_DIR
-   TOK_TMP_FILE
- TOK_SELECT
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- a
-  contact_event_id
-   contact_event_id
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- a
-  ce_create_dt
-   ce_create_dt
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- a
-  ce_end_dt
-   ce_end_dt
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- a
-  contact_type
-   contact_type
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- a
-  cnctevs_cd
-   cnctevs_cd
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- a
-  contact_mode
-   contact_mode
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- a
-  cntvnst_stts_cd
-   cntvnst_stts_cd
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- a
-  total_transfers
-   total_transfers
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- a
-  ce_notes
-   ce_notes
- TOK_WHERE
-TOK_FUNCTION
-   TOK_ISNOTNULL
-   .
-  TOK_TABLE_OR_COL
- a
-  contact_event_id
-   $hdt$_0
-TOK_SUBQUERY
-   TOK_QUERY
-  TOK_FROM
- TOK_TABREF
-TOK_TABNAME
-   default
-   service_request_clean
-b
-  TOK_INSERT
- TOK_DESTINATION
-TOK_DIR
-   TOK_TMP_FILE
- TOK_SELECT
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- b
-  cnctevn_id
-   cnctevn_id
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- b
-  svcrqst_id
-   svcrqst_id
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- b
-  cnctmd_cd
-   cnctmd_cd
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- b
-  svcrtyp_cd

[08/18] hive git commit: HIVE-13533: Remove AST dump (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

2016-04-25 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/spark/join35.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/join35.q.out 
b/ql/src/test/results/clientpositive/spark/join35.q.out
index 60ceca0..7f9aa24 100644
--- a/ql/src/test/results/clientpositive/spark/join35.q.out
+++ b/ql/src/test/results/clientpositive/spark/join35.q.out
@@ -34,118 +34,6 @@ FROM
 ) subq1
 JOIN src1 x ON (x.key = subq1.key)
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_JOIN
- TOK_SUBQUERY
-TOK_UNIONALL
-   TOK_QUERY
-  TOK_FROM
- TOK_TABREF
-TOK_TABNAME
-   src
-x
-  TOK_INSERT
- TOK_DESTINATION
-TOK_DIR
-   TOK_TMP_FILE
- TOK_SELECT
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- x
-  key
-   key
-TOK_SELEXPR
-   TOK_FUNCTION
-  count
-  1
-   cnt
- TOK_WHERE
-<
-   .
-  TOK_TABLE_OR_COL
- x
-  key
-   20
- TOK_GROUPBY
-.
-   TOK_TABLE_OR_COL
-  x
-   key
-   TOK_QUERY
-  TOK_FROM
- TOK_TABREF
-TOK_TABNAME
-   src
-x1
-  TOK_INSERT
- TOK_DESTINATION
-TOK_DIR
-   TOK_TMP_FILE
- TOK_SELECT
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- x1
-  key
-   key
-TOK_SELEXPR
-   TOK_FUNCTION
-  count
-  1
-   cnt
- TOK_WHERE
->
-   .
-  TOK_TABLE_OR_COL
- x1
-  key
-   100
- TOK_GROUPBY
-.
-   TOK_TABLE_OR_COL
-  x1
-   key
-subq1
- TOK_TABREF
-TOK_TABNAME
-   src1
-x
- =
-.
-   TOK_TABLE_OR_COL
-  x
-   key
-.
-   TOK_TABLE_OR_COL
-  subq1
-   key
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_TAB
-TOK_TABNAME
-   dest_j1
-  TOK_SELECT
- TOK_SELEXPR
-.
-   TOK_TABLE_OR_COL
-  x
-   key
- TOK_SELEXPR
-.
-   TOK_TABLE_OR_COL
-  x
-   value
- TOK_SELEXPR
-.
-   TOK_TABLE_OR_COL
-  subq1
-   cnt
-
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1

http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/spark/join9.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/join9.q.out 
b/ql/src/test/results/clientpositive/spark/join9.q.out
index 5f26aaf..714302e 100644
--- a/ql/src/test/results/clientpositive/spark/join9.q.out
+++ b/ql/src/test/results/clientpositive/spark/join9.q.out
@@ -18,60 +18,6 @@ POSTHOOK: query: EXPLAIN EXTENDED
 FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key)
 INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value where src1.ds = 
'2008-04-08' and src1.hr = '12'
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_JOIN
- TOK_TABREF
-TOK_TABNAME
-   srcpart
-src1
- TOK_TABREF
-TOK_TABNAME
-   src
-src2
- =
-.
-   TOK_TABLE_OR_COL
-  src1
-   key
-.
-   TOK_TABLE_OR_COL
-  src2
-   key
-   TOK_INSERT
-

[16/18] hive git commit: HIVE-13533: Remove AST dump (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

2016-04-25 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out
--
diff --git a/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out 
b/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out
index c95681a..3078854 100644
--- a/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out
+++ b/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out
@@ -148,52 +148,6 @@ PREHOOK: type: QUERY
 POSTHOOK: query: -- should be allowed
 explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on 
L.key=R.key AND L.value=R.value
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_JOIN
- TOK_TABREF
-TOK_TABNAME
-   test1
-L
- TOK_TABREF
-TOK_TABNAME
-   test1
-R
- AND
-=
-   .
-  TOK_TABLE_OR_COL
- L
-  key
-   .
-  TOK_TABLE_OR_COL
- R
-  key
-=
-   .
-  TOK_TABLE_OR_COL
- L
-  value
-   .
-  TOK_TABLE_OR_COL
- R
-  value
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_HINTLIST
-TOK_HINT
-   TOK_MAPJOIN
-   TOK_HINTARGLIST
-  R
- TOK_SELEXPR
-TOK_ALLCOLREF
-
-
 STAGE DEPENDENCIES:
   Stage-3 is a root stage
   Stage-1 depends on stages: Stage-3
@@ -336,52 +290,6 @@ PREHOOK: query: explain extended select /* + MAPJOIN(R) */ 
* from test2 L join t
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test2 L 
join test2 R on L.key=R.key AND L.value=R.value
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_JOIN
- TOK_TABREF
-TOK_TABNAME
-   test2
-L
- TOK_TABREF
-TOK_TABNAME
-   test2
-R
- AND
-=
-   .
-  TOK_TABLE_OR_COL
- L
-  key
-   .
-  TOK_TABLE_OR_COL
- R
-  key
-=
-   .
-  TOK_TABLE_OR_COL
- L
-  value
-   .
-  TOK_TABLE_OR_COL
- R
-  value
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_HINTLIST
-TOK_HINT
-   TOK_MAPJOIN
-   TOK_HINTARGLIST
-  R
- TOK_SELEXPR
-TOK_ALLCOLREF
-
-
 STAGE DEPENDENCIES:
   Stage-3 is a root stage
   Stage-1 depends on stages: Stage-3
@@ -526,47 +434,6 @@ PREHOOK: type: QUERY
 POSTHOOK: query: -- should not apply bucket mapjoin
 explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on 
L.key+L.key=R.key
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_JOIN
- TOK_TABREF
-TOK_TABNAME
-   test1
-L
- TOK_TABREF
-TOK_TABNAME
-   test1
-R
- =
-+
-   .
-  TOK_TABLE_OR_COL
- L
-  key
-   .
-  TOK_TABLE_OR_COL
- L
-  key
-.
-   TOK_TABLE_OR_COL
-  R
-   key
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_HINTLIST
-TOK_HINT
-   TOK_MAPJOIN
-   TOK_HINTARGLIST
-  R
- TOK_SELEXPR
-TOK_ALLCOLREF
-
-
 STAGE DEPENDENCIES:
   Stage-3 is a root stage
   Stage-1 depends on stages: Stage-3
@@ -701,52 +568,6 @@ PREHOOK: query: explain extended select /* + MAPJOIN(R) */ 
* from test1 L join t
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test1 L 
join test2 R on L.key=R.key AND L.value=R.value
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_JOIN
- TOK_TABREF
-TOK_TABNAME
-   test1
-L
- TOK_TABREF
-TOK_TABNAME
-   test2
-R
- AND
-=
-   .
-  TOK_TABLE_OR_COL
- L
-  key
-   .
-  TOK_TABLE_OR_COL
- R
-  key
-=
-   .
-   

[09/18] hive git commit: HIVE-13533: Remove AST dump (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

2016-04-25 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/spark/ctas.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/ctas.q.out 
b/ql/src/test/results/clientpositive/spark/ctas.q.out
index 086ad73..5396ada 100644
--- a/ql/src/test/results/clientpositive/spark/ctas.q.out
+++ b/ql/src/test/results/clientpositive/spark/ctas.q.out
@@ -678,49 +678,6 @@ PREHOOK: query: explain extended create table nzhang_ctas5 
row format delimited
 PREHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: query: explain extended create table nzhang_ctas5 row format 
delimited fields terminated by ',' lines terminated by '\012' stored as 
textfile as select key, value from src sort by key, value limit 10
 POSTHOOK: type: CREATETABLE_AS_SELECT
-ABSTRACT SYNTAX TREE:
-  
-TOK_CREATETABLE
-   TOK_TABNAME
-  nzhang_ctas5
-   TOK_LIKETABLE
-   TOK_TABLEROWFORMAT
-  TOK_SERDEPROPS
- TOK_TABLEROWFORMATFIELD
-','
- TOK_TABLEROWFORMATLINES
-'\012'
-   TOK_FILEFORMAT_GENERIC
-  textfile
-   TOK_QUERY
-  TOK_FROM
- TOK_TABREF
-TOK_TABNAME
-   src
-  TOK_INSERT
- TOK_DESTINATION
-TOK_DIR
-   TOK_TMP_FILE
- TOK_SELECT
-TOK_SELEXPR
-   TOK_TABLE_OR_COL
-  key
-TOK_SELEXPR
-   TOK_TABLE_OR_COL
-  value
- TOK_SORTBY
-TOK_TABSORTCOLNAMEASC
-   TOK_NULLS_FIRST
-  TOK_TABLE_OR_COL
- key
-TOK_TABSORTCOLNAMEASC
-   TOK_NULLS_FIRST
-  TOK_TABLE_OR_COL
- value
- TOK_LIMIT
-10
-
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1

http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out 
b/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out
index fc2ff7b..e21b1b1 100644
--- a/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out
+++ b/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out
@@ -14,23 +14,6 @@ POSTHOOK: query: explain extended
 insert overwrite table bucket2_1
 select * from src
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_TABREF
- TOK_TABNAME
-src
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_TAB
-TOK_TABNAME
-   bucket2_1
-  TOK_SELECT
- TOK_SELEXPR
-TOK_ALLCOLREF
-
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1

http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/spark/filter_join_breaktask.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/spark/filter_join_breaktask.q.out 
b/ql/src/test/results/clientpositive/spark/filter_join_breaktask.q.out
index c5f16ed..0bfdf5b 100644
--- a/ql/src/test/results/clientpositive/spark/filter_join_breaktask.q.out
+++ b/ql/src/test/results/clientpositive/spark/filter_join_breaktask.q.out
@@ -32,108 +32,6 @@ SELECT f.key, g.value
 FROM filter_join_breaktask f JOIN filter_join_breaktask m ON( f.key = m.key 
AND f.ds='2008-04-08' AND m.ds='2008-04-08' AND f.key is not null) 
 JOIN filter_join_breaktask g ON(g.value = m.value AND g.ds='2008-04-08' AND 
m.ds='2008-04-08' AND m.value is not null AND m.value !='')
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_JOIN
- TOK_JOIN
-TOK_TABREF
-   TOK_TABNAME
-  filter_join_breaktask
-   f
-TOK_TABREF
-   TOK_TABNAME
-  filter_join_breaktask
-   m
-AND
-   AND
-  AND
- =
-.
-   TOK_TABLE_OR_COL
-  f
-   key
-.
-   TOK_TABLE_OR_COL
-  m
-   key
- =
-.
-   TOK_TABLE_OR_COL
-  f
-   ds
-'2008-04-08'
-  =
- .
-TOK_TABLE_OR_COL
-   m
-ds
- '2008-04-08'
-   TOK_FUNCTION
-  TOK_ISNOTNULL
-  .
-   

[17/18] hive git commit: HIVE-13533: Remove AST dump (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

2016-04-25 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out
--
diff --git a/ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out 
b/ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out
index 3b1912a..19937cb 100644
--- a/ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out
+++ b/ql/src/test/results/clientpositive/bucket_map_join_spark1.q.out
@@ -116,58 +116,6 @@ select a.key, a.value, b.value
 from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
 on a.key=b.key and b.ds="2008-04-08"
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_JOIN
- TOK_TABREF
-TOK_TABNAME
-   srcbucket_mapjoin_part
-a
- TOK_TABREF
-TOK_TABNAME
-   srcbucket_mapjoin_part_2
-b
- and
-=
-   .
-  TOK_TABLE_OR_COL
- a
-  key
-   .
-  TOK_TABLE_OR_COL
- b
-  key
-=
-   .
-  TOK_TABLE_OR_COL
- b
-  ds
-   "2008-04-08"
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_TAB
-TOK_TABNAME
-   bucketmapjoin_tmp_result
-  TOK_SELECT
- TOK_SELEXPR
-.
-   TOK_TABLE_OR_COL
-  a
-   key
- TOK_SELEXPR
-.
-   TOK_TABLE_OR_COL
-  a
-   value
- TOK_SELEXPR
-.
-   TOK_TABLE_OR_COL
-  b
-   value
-
-
 STAGE DEPENDENCIES:
   Stage-5 is a root stage
   Stage-4 depends on stages: Stage-5
@@ -478,58 +426,6 @@ select a.key, a.value, b.value
 from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
 on a.key=b.key and b.ds="2008-04-08"
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_JOIN
- TOK_TABREF
-TOK_TABNAME
-   srcbucket_mapjoin_part
-a
- TOK_TABREF
-TOK_TABNAME
-   srcbucket_mapjoin_part_2
-b
- and
-=
-   .
-  TOK_TABLE_OR_COL
- a
-  key
-   .
-  TOK_TABLE_OR_COL
- b
-  key
-=
-   .
-  TOK_TABLE_OR_COL
- b
-  ds
-   "2008-04-08"
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_TAB
-TOK_TABNAME
-   bucketmapjoin_tmp_result
-  TOK_SELECT
- TOK_SELEXPR
-.
-   TOK_TABLE_OR_COL
-  a
-   key
- TOK_SELEXPR
-.
-   TOK_TABLE_OR_COL
-  a
-   value
- TOK_SELEXPR
-.
-   TOK_TABLE_OR_COL
-  b
-   value
-
-
 STAGE DEPENDENCIES:
   Stage-5 is a root stage
   Stage-4 depends on stages: Stage-5

http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out
--
diff --git a/ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out 
b/ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out
index 9a6aef7..90528be 100644
--- a/ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out
+++ b/ql/src/test/results/clientpositive/bucket_map_join_spark2.q.out
@@ -100,58 +100,6 @@ select a.key, a.value, b.value
 from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
 on a.key=b.key and b.ds="2008-04-08"
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_JOIN
- TOK_TABREF
-TOK_TABNAME
-   srcbucket_mapjoin_part
-a
- TOK_TABREF
-TOK_TABNAME
-   srcbucket_mapjoin_part_2
-b
- and
-=
-   .
-  TOK_TABLE_OR_COL
- a
-  key
-   .
-  TOK_TABLE_OR_COL
- b
-  key
-=
-   .
-  TOK_TABLE_OR_COL
- b
-  ds
-   "2008-04-08"
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_TAB
-TOK_TABNAME
-   bucketmapjoin_tmp_result
-  TOK_SELECT
- TOK_SELEXPR
-.
-   TOK_TABLE_OR_COL
-  a
-   key
- TOK_SELEXPR
-.
-   TOK_TABLE_OR_COL
-  a
-   value
- TOK_SELEXPR
-   

[05/18] hive git commit: HIVE-13533: Remove AST dump (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

2016-04-25 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/stats12.q.out
--
diff --git a/ql/src/test/results/clientpositive/stats12.q.out 
b/ql/src/test/results/clientpositive/stats12.q.out
index 6a44d15..9f37359 100644
--- a/ql/src/test/results/clientpositive/stats12.q.out
+++ b/ql/src/test/results/clientpositive/stats12.q.out
@@ -39,20 +39,6 @@ PREHOOK: type: QUERY
 POSTHOOK: query: explain extended
 analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_ANALYZE
-   TOK_TAB
-  TOK_TABNAME
- analyze_srcpart
-  TOK_PARTSPEC
- TOK_PARTVAL
-ds
-'2008-04-08'
- TOK_PARTVAL
-hr
-
-
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
   Stage-1 depends on stages: Stage-0

http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/stats13.q.out
--
diff --git a/ql/src/test/results/clientpositive/stats13.q.out 
b/ql/src/test/results/clientpositive/stats13.q.out
index f98753e..6fcd353 100644
--- a/ql/src/test/results/clientpositive/stats13.q.out
+++ b/ql/src/test/results/clientpositive/stats13.q.out
@@ -39,21 +39,6 @@ PREHOOK: type: QUERY
 POSTHOOK: query: explain extended
 analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute 
statistics
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_ANALYZE
-   TOK_TAB
-  TOK_TABNAME
- analyze_srcpart
-  TOK_PARTSPEC
- TOK_PARTVAL
-ds
-'2008-04-08'
- TOK_PARTVAL
-hr
-11
-
-
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
   Stage-1 depends on stages: Stage-0

http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/stats3.q.out
--
diff --git a/ql/src/test/results/clientpositive/stats3.q.out 
b/ql/src/test/results/clientpositive/stats3.q.out
index 0d8cbbd..4dbda94 100644
--- a/ql/src/test/results/clientpositive/stats3.q.out
+++ b/ql/src/test/results/clientpositive/stats3.q.out
@@ -20,17 +20,6 @@ PREHOOK: type: LOAD
 POSTHOOK: query: explain extended
 load data local inpath '../../data/files/test.dat' overwrite into table 
hive_test_src
 POSTHOOK: type: LOAD
-ABSTRACT SYNTAX TREE:
-  
-TOK_LOAD
-   '../../data/files/test.dat'
-   TOK_TAB
-  TOK_TABNAME
- hive_test_src
-   local
-   overwrite
-
-
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
   Stage-1 depends on stages: Stage-0

http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out 
b/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out
index 532e65a..e229dba 100644
--- 
a/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out
+++ 
b/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out
@@ -113,19 +113,6 @@ PREHOOK: type: QUERY
 POSTHOOK: query: explain extended
 analyze table UserVisits_web_text_none compute statistics for columns 
sourceIP, avgTimeOnSite, adRevenue
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_ANALYZE
-   TOK_TAB
-  TOK_TABNAME
- UserVisits_web_text_none
-   columns
-   TOK_TABCOLNAME
-  sourceIP
-  avgTimeOnSite
-  adRevenue
-
-
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
   Stage-1 depends on stages: Stage-0

http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_1.q.out
--
diff --git a/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_1.q.out 
b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_1.q.out
index bd6db7f..892539d 100644
--- a/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_1.q.out
@@ -109,38 +109,6 @@ PREHOOK: type: QUERY
 POSTHOOK: query: -- Since size is being used to find the big table, the order 
of the tables in the join does not matter
 explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON 
a.key = b.key
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_JOIN
- TOK_TABREF
-TOK_TABNAME
-   bucket_small
-a
- TOK_TABREF
-TOK_TABNAME
-   bucket_big
-b
- =
-.
-   TOK_TABLE_OR_COL
-  a
-   key
-.
-   TOK_TABLE_OR_COL
-  b
-   

[07/18] hive git commit: HIVE-13533: Remove AST dump (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

2016-04-25 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out 
b/ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out
index 9f94f16..6b1cadf 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out
@@ -24,101 +24,6 @@ group by key
 ) b
 on a.key=b.key and b.k1 < 5
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_JOIN
- TOK_TABREF
-TOK_TABNAME
-   src
-a
- TOK_SUBQUERY
-TOK_QUERY
-   TOK_FROM
-  TOK_TABREF
- TOK_TABNAME
-src
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   key
- TOK_SELEXPR
-TOK_FUNCTION
-   min
-   TOK_TABLE_OR_COL
-  key
-k
- TOK_SELEXPR
-+
-   TOK_FUNCTION
-  min
-  TOK_TABLE_OR_COL
- key
-   1
-k1
- TOK_SELEXPR
-+
-   TOK_FUNCTION
-  min
-  TOK_TABLE_OR_COL
- key
-   2
-k2
- TOK_SELEXPR
-+
-   TOK_FUNCTION
-  min
-  TOK_TABLE_OR_COL
- key
-   3
-k3
-  TOK_GROUPBY
- TOK_TABLE_OR_COL
-key
-b
- and
-=
-   .
-  TOK_TABLE_OR_COL
- a
-  key
-   .
-  TOK_TABLE_OR_COL
- b
-  key
-<
-   .
-  TOK_TABLE_OR_COL
- b
-  k1
-   5
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-.
-   TOK_TABLE_OR_COL
-  a
-   key
- TOK_SELEXPR
-.
-   TOK_TABLE_OR_COL
-  b
-   k2
- TOK_SELEXPR
-.
-   TOK_TABLE_OR_COL
-  b
-   k3
-
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -406,101 +311,6 @@ group by key
 ) b
 on a.key=b.key and b.k1 < 5
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_JOIN
- TOK_TABREF
-TOK_TABNAME
-   src
-a
- TOK_SUBQUERY
-TOK_QUERY
-   TOK_FROM
-  TOK_TABREF
- TOK_TABNAME
-src
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   key
- TOK_SELEXPR
-TOK_FUNCTION
-   min
-   TOK_TABLE_OR_COL
-  key
-k
- TOK_SELEXPR
-+
-   TOK_FUNCTION
-  min
-  TOK_TABLE_OR_COL
- key
-   1
-k1
- TOK_SELEXPR
-+
-   TOK_FUNCTION
-  min
-  TOK_TABLE_OR_COL
- key
-   2
-k2
- TOK_SELEXPR
-+
-   TOK_FUNCTION
-  min
-  TOK_TABLE_OR_COL
- key
-   3
-k3
-  TOK_GROUPBY
-

[06/18] hive git commit: HIVE-13533: Remove AST dump (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

2016-04-25 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/spark/union24.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/union24.q.out 
b/ql/src/test/results/clientpositive/spark/union24.q.out
index 3bdc503..5032630 100644
--- a/ql/src/test/results/clientpositive/spark/union24.q.out
+++ b/ql/src/test/results/clientpositive/spark/union24.q.out
@@ -72,121 +72,6 @@ select s.key, s.count from (
   select key, count(1) as count from src5 where key < 10 group by key
 )s
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_SUBQUERY
- TOK_UNIONALL
-TOK_UNIONALL
-   TOK_UNIONALL
-  TOK_QUERY
- TOK_FROM
-TOK_TABREF
-   TOK_TABNAME
-  src2
- TOK_INSERT
-TOK_DESTINATION
-   TOK_DIR
-  TOK_TMP_FILE
-TOK_SELECT
-   TOK_SELEXPR
-  TOK_TABLE_OR_COL
- key
-   TOK_SELEXPR
-  TOK_TABLE_OR_COL
- count
-TOK_WHERE
-   <
-  TOK_TABLE_OR_COL
- key
-  10
-  TOK_QUERY
- TOK_FROM
-TOK_TABREF
-   TOK_TABNAME
-  src3
- TOK_INSERT
-TOK_DESTINATION
-   TOK_DIR
-  TOK_TMP_FILE
-TOK_SELECT
-   TOK_SELEXPR
-  TOK_TABLE_OR_COL
- key
-   TOK_SELEXPR
-  TOK_TABLE_OR_COL
- count
-TOK_WHERE
-   <
-  TOK_TABLE_OR_COL
- key
-  10
-   TOK_QUERY
-  TOK_FROM
- TOK_TABREF
-TOK_TABNAME
-   src4
-  TOK_INSERT
- TOK_DESTINATION
-TOK_DIR
-   TOK_TMP_FILE
- TOK_SELECT
-TOK_SELEXPR
-   TOK_TABLE_OR_COL
-  key
-TOK_SELEXPR
-   TOK_TABLE_OR_COL
-  count
- TOK_WHERE
-<
-   TOK_TABLE_OR_COL
-  key
-   10
-TOK_QUERY
-   TOK_FROM
-  TOK_TABREF
- TOK_TABNAME
-src5
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   key
- TOK_SELEXPR
-TOK_FUNCTION
-   count
-   1
-count
-  TOK_WHERE
- <
-TOK_TABLE_OR_COL
-   key
-10
-  TOK_GROUPBY
- TOK_TABLE_OR_COL
-key
- s
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-.
-   TOK_TABLE_OR_COL
-  s
-   key
- TOK_SELEXPR
-.
-   TOK_TABLE_OR_COL
-  s
-   count
-
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -641,117 +526,6 @@ select s.key, s.count from (
   select a.key as key, b.count as count from src4 a join src5 b on a.key=b.key 
where a.key < 10
 )s
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_SUBQUERY
- TOK_UNIONALL
-TOK_UNIONALL
-   TOK_QUERY
-  TOK_FROM
- TOK_TABREF
-TOK_TABNAME
-   src2
-  TOK_INSERT
- TOK_DESTINATION
-TOK_DIR
-   TOK_TMP_FILE
- TOK_SELECT
-   

[18/18] hive git commit: HIVE-13533: Remove AST dump (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

2016-04-25 Thread jcamacho
HIVE-13533: Remove AST dump (Jesus Camacho Rodriguez, reviewed by Ashutosh 
Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6908198d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6908198d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6908198d

Branch: refs/heads/master
Commit: 6908198df93b07e0e5fcadc7c901097028a07e94
Parents: 37db169
Author: Jesus Camacho Rodriguez 
Authored: Mon Apr 18 10:46:19 2016 +0100
Committer: Jesus Camacho Rodriguez 
Committed: Mon Apr 25 16:03:20 2016 +0100

--
 .../java/org/apache/hadoop/hive/ql/Driver.java  |2 +-
 .../apache/hadoop/hive/ql/exec/ExplainTask.java |   33 +-
 .../apache/hadoop/hive/ql/hooks/ATSHook.java|6 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java|4 +-
 .../hive/ql/parse/ExplainSemanticAnalyzer.java  |3 +-
 .../apache/hadoop/hive/ql/plan/ExplainWork.java |   16 -
 .../parse/TestUpdateDeleteSemanticAnalyzer.java |   44 +-
 .../alter_partition_coltype.q.out   |  168 --
 .../results/clientpositive/ambiguitycheck.q.out |   30 -
 .../analyze_table_null_partition.q.out  |   16 -
 .../auto_join_reordering_values.q.out   |   83 -
 .../clientpositive/auto_sortmerge_join_1.q.out  |   96 -
 .../clientpositive/auto_sortmerge_join_11.q.out |  153 --
 .../clientpositive/auto_sortmerge_join_12.q.out |   60 -
 .../clientpositive/auto_sortmerge_join_2.q.out  |   64 -
 .../clientpositive/auto_sortmerge_join_3.q.out  |   96 -
 .../clientpositive/auto_sortmerge_join_4.q.out  |   96 -
 .../clientpositive/auto_sortmerge_join_5.q.out  |   96 -
 .../clientpositive/auto_sortmerge_join_7.q.out  |   96 -
 .../clientpositive/auto_sortmerge_join_8.q.out  |   96 -
 .../clientpositive/binary_output_format.q.out   |   36 -
 .../test/results/clientpositive/bucket1.q.out   |   17 -
 .../test/results/clientpositive/bucket2.q.out   |   17 -
 .../test/results/clientpositive/bucket3.q.out   |   21 -
 .../test/results/clientpositive/bucket4.q.out   |   17 -
 .../test/results/clientpositive/bucket5.q.out   |   36 -
 .../results/clientpositive/bucket_many.q.out|   17 -
 .../clientpositive/bucket_map_join_1.q.out  |   47 -
 .../clientpositive/bucket_map_join_2.q.out  |   47 -
 .../clientpositive/bucket_map_join_spark1.q.out |  104 -
 .../clientpositive/bucket_map_join_spark2.q.out |  104 -
 .../clientpositive/bucket_map_join_spark3.q.out |  104 -
 .../clientpositive/bucket_map_join_spark4.q.out |  134 --
 .../clientpositive/bucketcontext_1.q.out|   74 -
 .../clientpositive/bucketcontext_2.q.out|   74 -
 .../clientpositive/bucketcontext_3.q.out|   74 -
 .../clientpositive/bucketcontext_4.q.out|   74 -
 .../clientpositive/bucketcontext_5.q.out|   74 -
 .../clientpositive/bucketcontext_6.q.out|   74 -
 .../clientpositive/bucketcontext_7.q.out|   74 -
 .../clientpositive/bucketcontext_8.q.out|   74 -
 .../results/clientpositive/bucketmapjoin1.q.out |  226 --
 .../clientpositive/bucketmapjoin10.q.out|   51 -
 .../clientpositive/bucketmapjoin11.q.out|  112 -
 .../clientpositive/bucketmapjoin12.q.out|  102 -
 .../clientpositive/bucketmapjoin13.q.out|  155 --
 .../results/clientpositive/bucketmapjoin2.q.out |  164 --
 .../results/clientpositive/bucketmapjoin3.q.out |  128 --
 .../results/clientpositive/bucketmapjoin4.q.out |  100 -
 .../results/clientpositive/bucketmapjoin5.q.out |  100 -
 .../results/clientpositive/bucketmapjoin7.q.out |   73 -
 .../results/clientpositive/bucketmapjoin8.q.out |  102 -
 .../results/clientpositive/bucketmapjoin9.q.out |  102 -
 .../clientpositive/bucketmapjoin_negative.q.out |   57 -
 .../bucketmapjoin_negative2.q.out   |   50 -
 .../bucketmapjoin_negative3.q.out   |  409 
 .../cbo_rp_outer_join_ppr.q.java1.7.out |  168 --
 .../clientpositive/columnstats_partlvl.q.out|   30 -
 .../clientpositive/columnstats_tbllvl.q.out |   27 -
 .../test/results/clientpositive/combine2.q.out  |   23 -
 .../constantPropagateForSubQuery.q.out  |   62 -
 ql/src/test/results/clientpositive/ctas.q.out   |   43 -
 .../disable_merge_for_bucketing.q.out   |   17 -
 .../display_colstats_tbllvl.q.out   |   13 -
 .../clientpositive/drop_multi_partitions.q.out  |   14 -
 .../dynamic_partition_skip_default.q.out|   91 -
 .../encryption_join_unencrypted_tbl.q.out   |   32 -
 ...on_join_with_different_encryption_keys.q.out |   32 -
 .../clientpositive/explain_logical.q.out|  316 ---
 .../extrapolate_part_stats_full.q.out   |   74 -
 .../extrapolate_part_stats_partial.q.out|  111 -
 .../extrapolate_part_stats_partial_ndv.q.out|   78 -
 .../clientpositive/filter_join_breaktask.q.out  |  102 -
 .../clientpositive/fouter_join_ppr.q.out 

[10/18] hive git commit: HIVE-13533: Remove AST dump (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

2016-04-25 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_7.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_7.q.out 
b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_7.q.out
index dd01e69..00c601f 100644
--- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_7.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_7.q.out
@@ -122,38 +122,6 @@ PREHOOK: type: QUERY
 POSTHOOK: query: -- Since size is being used to find the big table, the order 
of the tables in the join does not matter
 explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON 
a.key = b.key
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_JOIN
- TOK_TABREF
-TOK_TABNAME
-   bucket_small
-a
- TOK_TABREF
-TOK_TABNAME
-   bucket_big
-b
- =
-.
-   TOK_TABLE_OR_COL
-  a
-   key
-.
-   TOK_TABLE_OR_COL
-  b
-   key
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-TOK_FUNCTIONSTAR
-   count
-
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -356,38 +324,6 @@ PREHOOK: query: explain extended select count(*) FROM 
bucket_big a JOIN bucket_s
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN 
bucket_small b ON a.key = b.key
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_JOIN
- TOK_TABREF
-TOK_TABNAME
-   bucket_big
-a
- TOK_TABREF
-TOK_TABNAME
-   bucket_small
-b
- =
-.
-   TOK_TABLE_OR_COL
-  a
-   key
-.
-   TOK_TABLE_OR_COL
-  b
-   key
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-TOK_FUNCTIONSTAR
-   count
-
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -590,38 +526,6 @@ PREHOOK: query: explain extended select count(*) FROM 
bucket_big a JOIN bucket_s
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN 
bucket_small b ON a.key = b.key
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_JOIN
- TOK_TABREF
-TOK_TABNAME
-   bucket_big
-a
- TOK_TABREF
-TOK_TABNAME
-   bucket_small
-b
- =
-.
-   TOK_TABLE_OR_COL
-  a
-   key
-.
-   TOK_TABLE_OR_COL
-  b
-   key
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-TOK_FUNCTIONSTAR
-   count
-
-
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
   Stage-1 depends on stages: Stage-2

http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_8.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_8.q.out 
b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_8.q.out
index f0f27f0..5564ceb 100644
--- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_8.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_8.q.out
@@ -122,38 +122,6 @@ PREHOOK: type: QUERY
 POSTHOOK: query: -- Since size is being used to find the big table, the order 
of the tables in the join does not matter
 explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON 
a.key = b.key
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_JOIN
- TOK_TABREF
-TOK_TABNAME
-   bucket_small
-a
- TOK_TABREF
-TOK_TABNAME
-   bucket_big
-b
- =
-.
-   TOK_TABLE_OR_COL
-  a
-   key
-.
-   TOK_TABLE_OR_COL
-  b
-   key
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-TOK_FUNCTIONSTAR
-   count
-
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -356,38 +324,6 @@ PREHOOK: query: explain extended select count(*) 

[14/18] hive git commit: HIVE-13533: Remove AST dump (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

2016-04-25 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/join9.q.out
--
diff --git a/ql/src/test/results/clientpositive/join9.q.out 
b/ql/src/test/results/clientpositive/join9.q.out
index e904b31..f41d153 100644
--- a/ql/src/test/results/clientpositive/join9.q.out
+++ b/ql/src/test/results/clientpositive/join9.q.out
@@ -18,60 +18,6 @@ POSTHOOK: query: EXPLAIN EXTENDED
 FROM srcpart src1 JOIN src src2 ON (src1.key = src2.key)
 INSERT OVERWRITE TABLE dest1 SELECT src1.key, src2.value where src1.ds = 
'2008-04-08' and src1.hr = '12'
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_JOIN
- TOK_TABREF
-TOK_TABNAME
-   srcpart
-src1
- TOK_TABREF
-TOK_TABNAME
-   src
-src2
- =
-.
-   TOK_TABLE_OR_COL
-  src1
-   key
-.
-   TOK_TABLE_OR_COL
-  src2
-   key
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_TAB
-TOK_TABNAME
-   dest1
-  TOK_SELECT
- TOK_SELEXPR
-.
-   TOK_TABLE_OR_COL
-  src1
-   key
- TOK_SELEXPR
-.
-   TOK_TABLE_OR_COL
-  src2
-   value
-  TOK_WHERE
- and
-=
-   .
-  TOK_TABLE_OR_COL
- src1
-  ds
-   '2008-04-08'
-=
-   .
-  TOK_TABLE_OR_COL
- src1
-  hr
-   '12'
-
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1

http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/join_filters_overlap.q.out
--
diff --git a/ql/src/test/results/clientpositive/join_filters_overlap.q.out 
b/ql/src/test/results/clientpositive/join_filters_overlap.q.out
index dede6b7..ac00fd5 100644
--- a/ql/src/test/results/clientpositive/join_filters_overlap.q.out
+++ b/ql/src/test/results/clientpositive/join_filters_overlap.q.out
@@ -22,78 +22,6 @@ PREHOOK: type: QUERY
 POSTHOOK: query: -- overlap on a
 explain extended select * from a left outer join a b on (a.key=b.key AND 
a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 
AND c.value=60)
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_LEFTOUTERJOIN
- TOK_LEFTOUTERJOIN
-TOK_TABREF
-   TOK_TABNAME
-  a
-TOK_TABREF
-   TOK_TABNAME
-  a
-   b
-AND
-   AND
-  =
- .
-TOK_TABLE_OR_COL
-   a
-key
- .
-TOK_TABLE_OR_COL
-   b
-key
-  =
- .
-TOK_TABLE_OR_COL
-   a
-value
- 50
-   =
-  .
- TOK_TABLE_OR_COL
-b
- value
-  50
- TOK_TABREF
-TOK_TABNAME
-   a
-c
- AND
-AND
-   =
-  .
- TOK_TABLE_OR_COL
-a
- key
-  .
- TOK_TABLE_OR_COL
-c
- key
-   =
-  .
- TOK_TABLE_OR_COL
-a
- value
-  60
-=
-   .
-  TOK_TABLE_OR_COL
- c
-  value
-   60
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-TOK_ALLCOLREF
-
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -285,78 +213,6 @@ PREHOOK: type: QUERY
 POSTHOOK: query: -- overlap on b
 explain extended select * from a right outer join a b on (a.key=b.key AND 
a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 
AND c.value=60)
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_LEFTOUTERJOIN
- TOK_RIGHTOUTERJOIN
-TOK_TABREF
-   TOK_TABNAME
-  a
-TOK_TABREF
-   TOK_TABNAME
-  a
-   b
-AND
-   AND
-

[03/18] hive git commit: HIVE-13533: Remove AST dump (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

2016-04-25 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out
--
diff --git a/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out 
b/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out
index 3d1f22f..1365626 100644
--- a/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out
@@ -142,88 +142,6 @@ from noop(on part_orc
   order by p_name
   )
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_PTBLFUNCTION
- noop
- TOK_TABREF
-TOK_TABNAME
-   part_orc
- TOK_PARTITIONINGSPEC
-TOK_DISTRIBUTEBY
-   TOK_TABLE_OR_COL
-  p_mfgr
-TOK_ORDERBY
-   TOK_TABSORTCOLNAMEASC
-  TOK_NULLS_FIRST
- TOK_TABLE_OR_COL
-p_name
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   p_mfgr
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   p_name
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   p_size
- TOK_SELEXPR
-TOK_FUNCTION
-   rank
-   TOK_WINDOWSPEC
-  TOK_PARTITIONINGSPEC
- TOK_DISTRIBUTEBY
-TOK_TABLE_OR_COL
-   p_mfgr
- TOK_ORDERBY
-TOK_TABSORTCOLNAMEASC
-   TOK_NULLS_FIRST
-  TOK_TABLE_OR_COL
- p_name
-r
- TOK_SELEXPR
-TOK_FUNCTION
-   dense_rank
-   TOK_WINDOWSPEC
-  TOK_PARTITIONINGSPEC
- TOK_DISTRIBUTEBY
-TOK_TABLE_OR_COL
-   p_mfgr
- TOK_ORDERBY
-TOK_TABSORTCOLNAMEASC
-   TOK_NULLS_FIRST
-  TOK_TABLE_OR_COL
- p_name
-dr
- TOK_SELEXPR
-TOK_FUNCTION
-   sum
-   TOK_TABLE_OR_COL
-  p_retailprice
-   TOK_WINDOWSPEC
-  TOK_PARTITIONINGSPEC
- TOK_DISTRIBUTEBY
-TOK_TABLE_OR_COL
-   p_mfgr
- TOK_ORDERBY
-TOK_TABSORTCOLNAMEASC
-   TOK_NULLS_FIRST
-  TOK_TABLE_OR_COL
- p_name
-  TOK_WINDOWRANGE
- preceding
-unbounded
- current
-s1
-
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -472,94 +390,6 @@ from noop (on (select p1.* from part_orc p1 join part_orc 
p2 on p1.p_partkey = p
 distribute by j.p_mfgr
 sort by j.p_name)
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_PTBLFUNCTION
- noop
- TOK_SUBQUERY
-TOK_QUERY
-   TOK_FROM
-  TOK_JOIN
- TOK_TABREF
-TOK_TABNAME
-   part_orc
-p1
- TOK_TABREF
-TOK_TABNAME
-   part_orc
-p2
- =
-.
-   TOK_TABLE_OR_COL
-  p1
-   p_partkey
-.
-   TOK_TABLE_OR_COL
-  p2
-   p_partkey
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-TOK_ALLCOLREF
-   TOK_TABNAME
-  p1
-j
- TOK_PARTITIONINGSPEC
-TOK_DISTRIBUTEBY
-   .
-  TOK_TABLE_OR_COL
- j
-  p_mfgr
-TOK_SORTBY
-   TOK_TABSORTCOLNAMEASC
-  TOK_NULLS_FIRST
- .
-TOK_TABLE_OR_COL
-   j
-p_name
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   p_mfgr
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   

[02/18] hive git commit: HIVE-13533: Remove AST dump (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

2016-04-25 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/udf_reflect2.q.out
--
diff --git a/ql/src/test/results/clientpositive/udf_reflect2.q.out 
b/ql/src/test/results/clientpositive/udf_reflect2.q.out
index 6c22657..a862990 100644
--- a/ql/src/test/results/clientpositive/udf_reflect2.q.out
+++ b/ql/src/test/results/clientpositive/udf_reflect2.q.out
@@ -78,235 +78,6 @@ SELECT key,
reflect2(ts, "getTime")
 FROM (select cast(key as int) key, value, cast('2013-02-15 19:41:20' as 
timestamp) ts from src) a LIMIT 5
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_SUBQUERY
- TOK_QUERY
-TOK_FROM
-   TOK_TABREF
-  TOK_TABNAME
- src
-TOK_INSERT
-   TOK_DESTINATION
-  TOK_DIR
- TOK_TMP_FILE
-   TOK_SELECT
-  TOK_SELEXPR
- TOK_FUNCTION
-TOK_INT
-TOK_TABLE_OR_COL
-   key
- key
-  TOK_SELEXPR
- TOK_TABLE_OR_COL
-value
-  TOK_SELEXPR
- TOK_FUNCTION
-TOK_TIMESTAMP
-'2013-02-15 19:41:20'
- ts
- a
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   key
- TOK_SELEXPR
-TOK_FUNCTION
-   reflect2
-   TOK_TABLE_OR_COL
-  key
-   "byteValue"
- TOK_SELEXPR
-TOK_FUNCTION
-   reflect2
-   TOK_TABLE_OR_COL
-  key
-   "shortValue"
- TOK_SELEXPR
-TOK_FUNCTION
-   reflect2
-   TOK_TABLE_OR_COL
-  key
-   "intValue"
- TOK_SELEXPR
-TOK_FUNCTION
-   reflect2
-   TOK_TABLE_OR_COL
-  key
-   "longValue"
- TOK_SELEXPR
-TOK_FUNCTION
-   reflect2
-   TOK_TABLE_OR_COL
-  key
-   "floatValue"
- TOK_SELEXPR
-TOK_FUNCTION
-   reflect2
-   TOK_TABLE_OR_COL
-  key
-   "doubleValue"
- TOK_SELEXPR
-TOK_FUNCTION
-   reflect2
-   TOK_TABLE_OR_COL
-  key
-   "toString"
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   value
- TOK_SELEXPR
-TOK_FUNCTION
-   reflect2
-   TOK_TABLE_OR_COL
-  value
-   "concat"
-   "_concat"
- TOK_SELEXPR
-TOK_FUNCTION
-   reflect2
-   TOK_TABLE_OR_COL
-  value
-   "contains"
-   "86"
- TOK_SELEXPR
-TOK_FUNCTION
-   reflect2
-   TOK_TABLE_OR_COL
-  value
-   "startsWith"
-   "v"
- TOK_SELEXPR
-TOK_FUNCTION
-   reflect2
-   TOK_TABLE_OR_COL
-  value
-   "endsWith"
-   "6"
- TOK_SELEXPR
-TOK_FUNCTION
-   reflect2
-   TOK_TABLE_OR_COL
-  value
-   "equals"
-   "val_86"
- TOK_SELEXPR
-TOK_FUNCTION
-   reflect2
-   TOK_TABLE_OR_COL
-  value
-   "equalsIgnoreCase"
-   "VAL_86"
- TOK_SELEXPR
-TOK_FUNCTION
-   reflect2
-   TOK_TABLE_OR_COL
-  value
-   "getBytes"
- TOK_SELEXPR
-TOK_FUNCTION
-   reflect2
-   TOK_TABLE_OR_COL
-  value
-   "indexOf"
-   "1"
- TOK_SELEXPR
-TOK_FUNCTION
-   reflect2
-   TOK_TABLE_OR_COL
-  value
-   "lastIndexOf"
-   "1"
- TOK_SELEXPR
-TOK_FUNCTION
-   reflect2
-   TOK_TABLE_OR_COL
-  value
-   "replace"
-   "val"
-   "VALUE"
- TOK_SELEXPR
-TOK_FUNCTION
-   reflect2
-   TOK_TABLE_OR_COL
-  value
-   "substring"
-   1
- TOK_SELEXPR
-TOK_FUNCTION
-   reflect2
-   TOK_TABLE_OR_COL
-  value
-   "substring"
-   1
-

[13/18] hive git commit: HIVE-13533: Remove AST dump (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

2016-04-25 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/6908198d/ql/src/test/results/clientpositive/llap/tez_join_result_complex.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/llap/tez_join_result_complex.q.out 
b/ql/src/test/results/clientpositive/llap/tez_join_result_complex.q.out
index 440466c..6ca9ea6 100644
--- a/ql/src/test/results/clientpositive/llap/tez_join_result_complex.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_join_result_complex.q.out
@@ -148,271 +148,6 @@ inner join
 service_request_clean b
 on a.contact_event_id = b.cnctevn_id
 POSTHOOK: type: CREATETABLE_AS_SELECT
-ABSTRACT SYNTAX TREE:
-  
-TOK_CREATETABLE
-   TOK_TABNAME
-  ct_events1_test
-   TOK_LIKETABLE
-   TOK_QUERY
-  TOK_FROM
- TOK_JOIN
-TOK_SUBQUERY
-   TOK_QUERY
-  TOK_FROM
- TOK_TABREF
-TOK_TABNAME
-   default
-   ct_events_clean
-a
-  TOK_INSERT
- TOK_DESTINATION
-TOK_DIR
-   TOK_TMP_FILE
- TOK_SELECT
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- a
-  contact_event_id
-   contact_event_id
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- a
-  ce_create_dt
-   ce_create_dt
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- a
-  ce_end_dt
-   ce_end_dt
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- a
-  contact_type
-   contact_type
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- a
-  cnctevs_cd
-   cnctevs_cd
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- a
-  contact_mode
-   contact_mode
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- a
-  cntvnst_stts_cd
-   cntvnst_stts_cd
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- a
-  total_transfers
-   total_transfers
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- a
-  ce_notes
-   ce_notes
- TOK_WHERE
-TOK_FUNCTION
-   TOK_ISNOTNULL
-   .
-  TOK_TABLE_OR_COL
- a
-  contact_event_id
-   $hdt$_0
-TOK_SUBQUERY
-   TOK_QUERY
-  TOK_FROM
- TOK_TABREF
-TOK_TABNAME
-   default
-   service_request_clean
-b
-  TOK_INSERT
- TOK_DESTINATION
-TOK_DIR
-   TOK_TMP_FILE
- TOK_SELECT
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- b
-  cnctevn_id
-   cnctevn_id
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- b
-  svcrqst_id
-   svcrqst_id
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- b
-  cnctmd_cd
-   cnctmd_cd
-TOK_SELEXPR
-   .
-  TOK_TABLE_OR_COL
- b
-  

hive git commit: HIVE-13559: Pass exception to failure hooks (Jimmy Xiang, reviewed by Chaoyu Tang)

2016-04-25 Thread jxiang
Repository: hive
Updated Branches:
  refs/heads/master 413645d1b -> 37db169a8


HIVE-13559: Pass exception to failure hooks (Jimmy Xiang, reviewed by Chaoyu 
Tang)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/37db169a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/37db169a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/37db169a

Branch: refs/heads/master
Commit: 37db169a844ad1217abcde67b1350d7064d2c005
Parents: 413645d
Author: Jimmy Xiang 
Authored: Wed Apr 20 11:43:22 2016 -0700
Committer: Jimmy Xiang 
Committed: Mon Apr 25 07:33:54 2016 -0700

--
 ql/src/java/org/apache/hadoop/hive/ql/Driver.java   | 9 +
 .../java/org/apache/hadoop/hive/ql/hooks/HookContext.java   | 9 +
 2 files changed, 14 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/37db169a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java 
b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index 48fb060..9aefff8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -1641,7 +1641,7 @@ public class Driver implements CommandProcessor {
 
   } else {
 setErrorMsgAndDetail(exitVal, result.getTaskError(), tsk);
-invokeFailureHooks(perfLogger, hookContext);
+invokeFailureHooks(perfLogger, hookContext, result.getTaskError());
 SQLState = "08S01";
 console.printError(errorMessage);
 driverCxt.shutdown();
@@ -1677,7 +1677,7 @@ public class Driver implements CommandProcessor {
   if (driverCxt.isShutdown()) {
 SQLState = "HY008";
 errorMessage = "FAILED: Operation cancelled";
-invokeFailureHooks(perfLogger, hookContext);
+invokeFailureHooks(perfLogger, hookContext, null);
 console.printError(errorMessage);
 return 1000;
   }
@@ -1734,7 +1734,7 @@ public class Driver implements CommandProcessor {
   errorMessage = "FAILED: Hive Internal Error: " + 
Utilities.getNameMessage(e);
   if (hookContext != null) {
 try {
-  invokeFailureHooks(perfLogger, hookContext);
+  invokeFailureHooks(perfLogger, hookContext, e);
 } catch (Exception t) {
   LOG.warn("Failed to invoke failure hook", t);
 }
@@ -1813,9 +1813,10 @@ public class Driver implements CommandProcessor {
 }
   }
 
-  private void invokeFailureHooks(PerfLogger perfLogger, HookContext 
hookContext) throws Exception {
+  private void invokeFailureHooks(PerfLogger perfLogger, HookContext 
hookContext, Throwable exception) throws Exception {
 hookContext.setHookType(HookContext.HookType.ON_FAILURE_HOOK);
 hookContext.setErrorMessage(errorMessage);
+hookContext.setException(exception);
 // Get all the failure execution hooks and execute them.
 for (Hook ofh : getHooks(HiveConf.ConfVars.ONFAILUREHOOKS)) {
   perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.FAILURE_HOOK + 
ofh.getClass().getName());

http://git-wip-us.apache.org/repos/asf/hive/blob/37db169a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java 
b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java
index 6fd1f66..8e1672f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java
@@ -52,6 +52,7 @@ public class HookContext {
   private UserGroupInformation ugi;
   private HookType hookType;
   private String errorMessage;
+  private Throwable exception;
   final private Map inputPathToContentSummary;
   private final String ipAddress;
   private final String userName;
@@ -172,6 +173,14 @@ public class HookContext {
 return errorMessage;
   }
 
+  public void setException(Throwable exception) {
+this.exception = exception;
+  }
+
+  public Throwable getException() {
+return exception;
+  }
+
   public String getOperationName() {
 return queryPlan.getOperationName();
   }