hive git commit: HIVE-19488 : Enable CM root based on db parameter, identifying db as a source of replication (Mahesh Kumar Behera, reviewed by Sankar Hariappan)

2018-06-04 Thread sankarh
Repository: hive
Updated Branches:
  refs/heads/branch-3 bda5d51dd -> 752ba6824


HIVE-19488 : Enable CM root based on db parameter, identifying db as a source 
of replication (Mahesh Kumar Behera, reviewed by Sankar Hariappan)

Signed-off-by: Sankar Hariappan 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/752ba682
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/752ba682
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/752ba682

Branch: refs/heads/branch-3
Commit: 752ba68249841b2949ba34574184cdf0ad7c1911
Parents: bda5d51
Author: Mahesh Kumar Behera 
Authored: Tue May 29 22:12:07 2018 +0530
Committer: Sankar Hariappan 
Committed: Tue Jun 5 11:09:02 2018 +0530

--
 .../hive/metastore/TestReplChangeManager.java   |   3 +
 .../hadoop/hive/ql/parse/TestCopyUtils.java |   4 +-
 .../TestReplicationOnHDFSEncryptedZones.java|   4 +-
 .../hive/ql/parse/TestReplicationScenarios.java |  90 ++--
 .../TestReplicationScenariosAcidTables.java |   4 +-
 ...TestReplicationScenariosAcrossInstances.java |  18 ++-
 .../TestHiveAuthorizerCheckInvocation.java  |   4 +-
 .../compactor/TestCleanerWithReplication.java   |  34 +++--
 .../apache/hive/jdbc/TestJdbcWithMiniHS2.java   |   4 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |   8 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java|   9 +-
 .../hive/ql/parse/MetaDataExportListener.java   |   6 +-
 .../ql/parse/ReplicationSemanticAnalyzer.java   |  23 ++-
 .../hadoop/hive/ql/txn/compactor/Cleaner.java   |  10 +-
 .../clientnegative/repl_dump_requires_admin.q   |   2 +-
 .../clientnegative/repl_load_requires_admin.q   |   2 +-
 .../repl_dump_requires_admin.q.out  |   4 +-
 .../repl_load_requires_admin.q.out  |   4 +-
 .../hadoop/hive/metastore/HiveAlterHandler.java |  11 +-
 .../hadoop/hive/metastore/HiveMetaStore.java| 144 +++
 .../hive/metastore/ReplChangeManager.java   |  23 +++
 .../apache/hadoop/hive/metastore/Warehouse.java |  10 +-
 .../hadoop/hive/metastore/model/MDatabase.java  |  21 ++-
 23 files changed, 317 insertions(+), 125 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/752ba682/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java
index e63250c..235bd11 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.ReplChangeManager.RecycleType;
+import static 
org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Partition;
@@ -119,6 +120,7 @@ public class TestReplChangeManager {
 client.dropDatabase(dbName, true, true);
 
 Database db = new Database();
+db.putToParameters(SOURCE_OF_REPLICATION, "1,2,3");
 db.setName(dbName);
 client.createDatabase(db);
 
@@ -204,6 +206,7 @@ public class TestReplChangeManager {
 client.dropDatabase(dbName, true, true);
 
 Database db = new Database();
+db.putToParameters(SOURCE_OF_REPLICATION, "1, 2, 3");
 db.setName(dbName);
 client.createDatabase(db);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/752ba682/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestCopyUtils.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestCopyUtils.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestCopyUtils.java
index f14b430..0e0a5cc 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestCopyUtils.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestCopyUtils.java
@@ -42,6 +42,7 @@ import java.util.Map;
 
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.shims.HadoopShims.MiniMrShim;
+import static 
org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
 
 public class TestCopyUtils {
   @Rule
@@ -110,7 +111,8 @@ public class TestCopyUtils {
 replV1BackwardCompat = primary.getReplivationV1CompatRule(new 
ArrayList<>());
 primaryDbName 

hive git commit: HIVE-19708: Repl copy retrying with cm path even if the failure is due to network issue (Mahesh Kumar Behera, reviewed by Sankar Hariappan)

2018-06-04 Thread sankarh
Repository: hive
Updated Branches:
  refs/heads/master f567a8231 -> b15c842b5


HIVE-19708: Repl copy retrying with cm path even if the failure is due to 
network issue (Mahesh Kumar Behera, reviewed by Sankar Hariappan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b15c842b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b15c842b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b15c842b

Branch: refs/heads/master
Commit: b15c842b53e5f1e3457ff8e6a422544471d3c71c
Parents: f567a82
Author: Sankar Hariappan 
Authored: Tue Jun 5 10:37:57 2018 +0530
Committer: Sankar Hariappan 
Committed: Tue Jun 5 10:37:57 2018 +0530

--
 .../apache/hadoop/hive/common/FileUtils.java| 11 +++
 .../hadoop/hive/ql/parse/repl/CopyUtils.java| 78 +---
 .../ql/parse/repl/dump/io/FileOperations.java   | 48 ++--
 3 files changed, 104 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b15c842b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java 
b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
index 724752b..ec2f9f0 100644
--- a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
@@ -70,6 +70,8 @@ import org.slf4j.LoggerFactory;
 public final class FileUtils {
   private static final Logger LOG = 
LoggerFactory.getLogger(FileUtils.class.getName());
   private static final Random random = new Random();
+  public static final int MAX_IO_ERROR_RETRY = 5;
+  public static final int IO_ERROR_SLEEP_TIME = 100;
 
   public static final PathFilter HIDDEN_FILES_PATH_FILTER = new PathFilter() {
 @Override
@@ -1054,4 +1056,13 @@ public final class FileUtils {
   bb.position(bb.position() + fullLen);
 }
   }
+
+  /**
+   * Returns the incremented sleep time in milli seconds.
+   * @param repeatNum number of retry done so far.
+   */
+  public static int getSleepTime(int repeatNum) {
+return IO_ERROR_SLEEP_TIME * (int)(Math.pow(2.0, repeatNum));
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/b15c842b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/CopyUtils.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/CopyUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/CopyUtils.java
index 2557121..79b4652 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/CopyUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/CopyUtils.java
@@ -33,6 +33,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.security.auth.login.LoginException;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
@@ -96,21 +97,54 @@ public class CopyUtils {
 boolean isCopyError = false;
 List pathList = Lists.transform(srcFileList, 
ReplChangeManager.FileInfo::getEffectivePath);
 while (!pathList.isEmpty() && (repeat < MAX_COPY_RETRY)) {
-  LOG.info("Attempt: " + (repeat+1) + ". Copying files: " + pathList);
   try {
-isCopyError = false;
+// if its retrying, first regenerate the path list.
+if (repeat > 0) {
+  pathList = getFilesToRetry(sourceFs, srcFileList, destinationFs, 
destination, isCopyError);
+  if (pathList.isEmpty()) {
+// all files were copied successfully in last try. So can break 
from here.
+break;
+  }
+}
+
+LOG.info("Attempt: " + (repeat+1) + ". Copying files: " + pathList);
+
+// if exception happens during doCopyOnce, then need to call 
getFilesToRetry with copy error as true in retry.
+isCopyError = true;
 doCopyOnce(sourceFs, pathList, destinationFs, destination, 
useRegularCopy);
+
+// if exception happens after doCopyOnce, then need to call 
getFilesToRetry with copy error as false in retry.
+isCopyError = false;
   } catch (IOException e) {
 // If copy fails, fall through the retry logic
-isCopyError = true;
+LOG.info("file operation failed", e);
+
+if (repeat >= (MAX_COPY_RETRY - 1)) {
+  //no need to wait in the last iteration
+  break;
+}
+
+if (!(e instanceof FileNotFoundException)) {
+  int sleepTime = FileUtils.getSleepTime(repeat);
+  LOG.info("Sleep for " + sleepTime + " milliseconds before retry " + 
(repeat+1));
+  try {
+Thread.sleep(sleepTime);
+  } catch (InterruptedException timerEx) {
+LOG.info("sleep int

[4/8] hive git commit: HIVE-19358 : CBO decorrelation logic should generate Hive operators (Jesus Camacho Rodriguez via Ashutosh Chauhan)

2018-06-04 Thread hashutosh
http://git-wip-us.apache.org/repos/asf/hive/blob/bda5d51d/ql/src/test/results/clientpositive/llap/subquery_views.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/subquery_views.q.out 
b/ql/src/test/results/clientpositive/llap/subquery_views.q.out
index 2c85309..4609668 100644
--- a/ql/src/test/results/clientpositive/llap/subquery_views.q.out
+++ b/ql/src/test/results/clientpositive/llap/subquery_views.q.out
@@ -132,33 +132,33 @@ STAGE PLANS:
 Map-reduce partition columns: _col0 (type: string), 
_col1 (type: string)
 Statistics: Num rows: 166 Data size: 29548 Basic 
stats: COMPLETE Column stats: COMPLETE
   Filter Operator
-predicate: ((value > 'val_11') and key is not null) (type: 
boolean)
-Statistics: Num rows: 166 Data size: 29548 Basic stats: 
COMPLETE Column stats: COMPLETE
+predicate: ((key < '11') and (value > 'val_11')) (type: 
boolean)
+Statistics: Num rows: 55 Data size: 9790 Basic stats: 
COMPLETE Column stats: COMPLETE
 Group By Operator
   aggregations: count(), count(key)
   keys: key (type: string), value (type: string)
   mode: hash
   outputColumnNames: _col0, _col1, _col2, _col3
-  Statistics: Num rows: 83 Data size: 16102 Basic stats: 
COMPLETE Column stats: COMPLETE
+  Statistics: Num rows: 27 Data size: 5238 Basic stats: 
COMPLETE Column stats: COMPLETE
   Reduce Output Operator
 key expressions: _col0 (type: string), _col1 (type: 
string)
 sort order: ++
 Map-reduce partition columns: _col0 (type: string), 
_col1 (type: string)
-Statistics: Num rows: 83 Data size: 16102 Basic stats: 
COMPLETE Column stats: COMPLETE
+Statistics: Num rows: 27 Data size: 5238 Basic stats: 
COMPLETE Column stats: COMPLETE
 value expressions: _col2 (type: bigint), _col3 (type: 
bigint)
   Filter Operator
-predicate: ((value > 'val_11') and key is not null) (type: 
boolean)
-Statistics: Num rows: 166 Data size: 29548 Basic stats: 
COMPLETE Column stats: COMPLETE
+predicate: ((key < '11') and (value > 'val_11')) (type: 
boolean)
+Statistics: Num rows: 55 Data size: 9790 Basic stats: 
COMPLETE Column stats: COMPLETE
 Group By Operator
   keys: key (type: string), value (type: string)
   mode: hash
   outputColumnNames: _col0, _col1
-  Statistics: Num rows: 83 Data size: 14774 Basic stats: 
COMPLETE Column stats: COMPLETE
+  Statistics: Num rows: 27 Data size: 4806 Basic stats: 
COMPLETE Column stats: COMPLETE
   Reduce Output Operator
 key expressions: _col0 (type: string), _col1 (type: 
string)
 sort order: ++
 Map-reduce partition columns: _col0 (type: string), 
_col1 (type: string)
-Statistics: Num rows: 83 Data size: 14774 Basic stats: 
COMPLETE Column stats: COMPLETE
+Statistics: Num rows: 27 Data size: 4806 Basic stats: 
COMPLETE Column stats: COMPLETE
   Filter Operator
 predicate: (key < '11') (type: boolean)
 Statistics: Num rows: 166 Data size: 29548 Basic stats: 
COMPLETE Column stats: COMPLETE
@@ -172,19 +172,19 @@ STAGE PLANS:
 Map-reduce partition columns: _col0 (type: string), 
_col1 (type: string)
 Statistics: Num rows: 166 Data size: 29548 Basic 
stats: COMPLETE Column stats: COMPLETE
   Filter Operator
-predicate: ((value > 'val_11') and key is not null) (type: 
boolean)
-Statistics: Num rows: 166 Data size: 29548 Basic stats: 
COMPLETE Column stats: COMPLETE
+predicate: ((key < '11') and (value > 'val_11')) (type: 
boolean)
+Statistics: Num rows: 55 Data size: 9790 Basic stats: 
COMPLETE Column stats: COMPLETE
 Group By Operator
   aggregations: count(), count(key)
   keys: key (type: string), value (type: string)
   mode: hash
   outputColumnNames: _col0, _col1, _col2, _col3
-  Statistics: Num rows: 83 Data size: 16102 Basic stats: 
COMPLETE Column stats: COMPLETE
+  Statistics: Num rows: 27 Data size: 5238 Basic stats: 
COMPLETE Column stats: COMPLETE
   Reduce Output

[7/8] hive git commit: HIVE-19358 : CBO decorrelation logic should generate Hive operators (Jesus Camacho Rodriguez via Ashutosh Chauhan)

2018-06-04 Thread hashutosh
http://git-wip-us.apache.org/repos/asf/hive/blob/bda5d51d/ql/src/test/results/clientpositive/llap/subquery_in.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/subquery_in.q.out 
b/ql/src/test/results/clientpositive/llap/subquery_in.q.out
index 456e324..7dd64af 100644
--- a/ql/src/test/results/clientpositive/llap/subquery_in.q.out
+++ b/ql/src/test/results/clientpositive/llap/subquery_in.q.out
@@ -2106,7 +2106,7 @@ STAGE PLANS:
   alias: p
   Statistics: Num rows: 26 Data size: 3354 Basic stats: 
COMPLETE Column stats: COMPLETE
   Filter Operator
-predicate: p_name is not null (type: boolean)
+predicate: (p_name is not null and p_partkey is not null 
and p_size is not null) (type: boolean)
 Statistics: Num rows: 26 Data size: 3354 Basic stats: 
COMPLETE Column stats: COMPLETE
 Select Operator
   expressions: p_partkey (type: int), p_name (type: 
string), p_size (type: int)
@@ -2495,7 +2495,7 @@ STAGE PLANS:
   alias: part
   Statistics: Num rows: 26 Data size: 2808 Basic stats: 
COMPLETE Column stats: COMPLETE
   Filter Operator
-predicate: p_type is not null (type: boolean)
+predicate: (p_size is not null and p_type is not null) 
(type: boolean)
 Statistics: Num rows: 26 Data size: 2808 Basic stats: 
COMPLETE Column stats: COMPLETE
 Select Operator
   expressions: p_type (type: string), p_size (type: int)
@@ -2514,12 +2514,12 @@ STAGE PLANS:
 TableScan
   alias: part
   Statistics: Num rows: 26 Data size: 104 Basic stats: 
COMPLETE Column stats: COMPLETE
-  Select Operator
-expressions: (p_size + 1) (type: int)
-outputColumnNames: _col0
+  Filter Operator
+predicate: p_size is not null (type: boolean)
 Statistics: Num rows: 26 Data size: 104 Basic stats: 
COMPLETE Column stats: COMPLETE
-Filter Operator
-  predicate: _col0 is not null (type: boolean)
+Select Operator
+  expressions: (p_size + 1) (type: int)
+  outputColumnNames: _col0
   Statistics: Num rows: 26 Data size: 104 Basic stats: 
COMPLETE Column stats: COMPLETE
   Group By Operator
 keys: _col0 (type: int)
@@ -4331,17 +4331,17 @@ STAGE PLANS:
 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6, _col7, _col8, _col10
 Statistics: Num rows: 26 Data size: 16214 Basic stats: 
COMPLETE Column stats: COMPLETE
 Filter Operator
-  predicate: (sq_count_check(_col10, true) > 0) (type: boolean)
+  predicate: (sq_count_check(CASE WHEN (_col10 is null) THEN 
(0) ELSE (_col10) END, true) > 0) (type: boolean)
   Statistics: Num rows: 8 Data size: 4992 Basic stats: 
COMPLETE Column stats: COMPLETE
   Select Operator
 expressions: _col0 (type: int), _col1 (type: string), 
_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: 
int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
 outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8
-Statistics: Num rows: 8 Data size: 4992 Basic stats: 
COMPLETE Column stats: COMPLETE
+Statistics: Num rows: 8 Data size: 4952 Basic stats: 
COMPLETE Column stats: COMPLETE
 Reduce Output Operator
   key expressions: _col4 (type: string), UDFToLong(_col5) 
(type: bigint)
   sort order: ++
   Map-reduce partition columns: _col4 (type: string), 
UDFToLong(_col5) (type: bigint)
-  Statistics: Num rows: 8 Data size: 4992 Basic stats: 
COMPLETE Column stats: COMPLETE
+  Statistics: Num rows: 8 Data size: 4952 Basic stats: 
COMPLETE Column stats: COMPLETE
   value expressions: _col0 (type: int), _col1 (type: 
string), _col2 (type: string), _col3 (type: string), _col5 (type: int), _col6 
(type: string), _col7 (type: double), _col8 (type: string)
 Reducer 3 
 Execution mode: llap
@@ -4837,17 +4837,17 @@ STAGE PLANS:
 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6, _col7, _col8, _col10
 Statistics: Num rows: 26 Data size: 16214 Basic stats: 
COMPLETE Column stats: COMPLETE
 Filter Operator
-  predicate: (sq_count_check(_col10, true) > 0) (ty

[8/8] hive git commit: HIVE-19358 : CBO decorrelation logic should generate Hive operators (Jesus Camacho Rodriguez via Ashutosh Chauhan)

2018-06-04 Thread hashutosh
HIVE-19358 : CBO decorrelation logic should generate Hive operators (Jesus 
Camacho Rodriguez via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bda5d51d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bda5d51d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bda5d51d

Branch: refs/heads/branch-3
Commit: bda5d51ddf412a334909215dd5586dc9f43a8749
Parents: e5f7714
Author: Jesus Camacho Rodriguez 
Authored: Sun Apr 29 14:14:00 2018 -0700
Committer: Ashutosh Chauhan 
Committed: Mon Jun 4 21:01:19 2018 -0700

--
 .../ql/optimizer/calcite/HiveRelOptUtil.java| 113 +++
 .../calcite/cost/HiveVolcanoPlanner.java|   5 +-
 .../calcite/rules/HiveRelDecorrelator.java  | 442 +--
 .../clientpositive/llap/explainuser_1.q.out | 112 ++-
 .../llap/groupby_groupingset_bug.q.out  |  16 +-
 .../results/clientpositive/llap/lineage3.q.out  |   4 +-
 .../clientpositive/llap/subquery_in.q.out   | 100 ++-
 .../llap/subquery_in_having.q.out   |  60 +-
 .../clientpositive/llap/subquery_multi.q.out| 212 --
 .../clientpositive/llap/subquery_notin.q.out| 738 ++-
 .../clientpositive/llap/subquery_scalar.q.out   | 545 +++---
 .../clientpositive/llap/subquery_select.q.out   | 100 ++-
 .../clientpositive/llap/subquery_views.q.out|  93 +--
 .../clientpositive/perf/spark/query1.q.out  |  16 +-
 .../clientpositive/perf/spark/query30.q.out |  18 +-
 .../clientpositive/perf/spark/query32.q.out |  18 +-
 .../clientpositive/perf/spark/query6.q.out  |  22 +-
 .../clientpositive/perf/spark/query81.q.out |  18 +-
 .../clientpositive/perf/spark/query92.q.out |  18 +-
 .../clientpositive/perf/tez/query1.q.out|   8 +-
 .../clientpositive/perf/tez/query30.q.out   |  10 +-
 .../clientpositive/perf/tez/query32.q.out   |   8 +-
 .../clientpositive/perf/tez/query6.q.out|  12 +-
 .../clientpositive/perf/tez/query81.q.out   |  50 +-
 .../clientpositive/perf/tez/query92.q.out   |   8 +-
 .../spark/spark_explainuser_1.q.out | 120 +--
 .../clientpositive/spark/subquery_in.q.out  |  84 +--
 .../clientpositive/spark/subquery_multi.q.out   |  87 +--
 .../clientpositive/spark/subquery_notin.q.out   | 512 ++---
 .../clientpositive/spark/subquery_scalar.q.out  | 537 +++---
 .../clientpositive/spark/subquery_select.q.out  |  90 +--
 .../clientpositive/spark/subquery_views.q.out   |  85 ++-
 .../clientpositive/subquery_notexists.q.out |  42 +-
 .../clientpositive/subquery_notin_having.q.out  |  43 +-
 .../subquery_unqualcolumnrefs.q.out |  25 +-
 35 files changed, 2233 insertions(+), 2138 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/bda5d51d/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
index 50fbb78..268284a 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
@@ -21,13 +21,19 @@ import java.util.AbstractList;
 import java.util.ArrayList;
 import java.util.List;
 
+import com.google.common.collect.ImmutableList;
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelOptUtil;
 import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.AggregateCall;
+import org.apache.calcite.rel.core.Filter;
+import org.apache.calcite.rel.core.RelFactories;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.rex.RexBuilder;
 import org.apache.calcite.rex.RexCall;
+import org.apache.calcite.rex.RexFieldAccess;
+import org.apache.calcite.rex.RexInputRef;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.rex.RexUtil;
 import org.apache.calcite.sql.SqlKind;
@@ -347,4 +353,111 @@ public class HiveRelOptUtil extends RelOptUtil {
 }, true, relBuilder);
   }
 
+  public static RexNode splitCorrelatedFilterCondition(
+  Filter filter,
+  List joinKeys,
+  List correlatedJoinKeys,
+  boolean extractCorrelatedFieldAccess) {
+final List nonEquiList = new ArrayList<>();
+
+splitCorrelatedFilterCondition(
+filter,
+filter.getCondition(),
+joinKeys,
+correlatedJoinKeys,
+nonEquiList,
+extractCorrelatedFieldAccess);
+
+// Convert the remainders into a list that are AND'ed together.
+return RexUtil.composeConjunction(
+ 

[6/8] hive git commit: HIVE-19358 : CBO decorrelation logic should generate Hive operators (Jesus Camacho Rodriguez via Ashutosh Chauhan)

2018-06-04 Thread hashutosh
http://git-wip-us.apache.org/repos/asf/hive/blob/bda5d51d/ql/src/test/results/clientpositive/llap/subquery_notin.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/subquery_notin.q.out 
b/ql/src/test/results/clientpositive/llap/subquery_notin.q.out
index d72e8c3..5eabc7d 100644
--- a/ql/src/test/results/clientpositive/llap/subquery_notin.q.out
+++ b/ql/src/test/results/clientpositive/llap/subquery_notin.q.out
@@ -388,7 +388,7 @@ STAGE PLANS:
 outputColumnNames: _col0, _col1, _col2, _col4, _col5, _col8
 Statistics: Num rows: 26 Data size: 5994 Basic stats: COMPLETE 
Column stats: COMPLETE
 Filter Operator
-  predicate: (not CASE WHEN ((_col4 = 0L)) THEN (false) WHEN 
(_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col0 
is null) THEN (null) WHEN ((_col5 < _col4)) THEN (true) ELSE (false) END) 
(type: boolean)
+  predicate: CASE WHEN ((_col4 = 0L)) THEN (true) WHEN (_col4 
is null) THEN (true) WHEN (_col8 is not null) THEN (false) WHEN (_col0 is null) 
THEN (null) WHEN ((_col5 < _col4)) THEN (false) ELSE (true) END (type: boolean)
   Statistics: Num rows: 13 Data size: 3007 Basic stats: 
COMPLETE Column stats: COMPLETE
   Select Operator
 expressions: _col1 (type: string), _col0 (type: string), 
_col2 (type: int)
@@ -492,7 +492,7 @@ STAGE PLANS:
   isPivotResult: true
   Statistics: Num rows: 26 Data size: 12766 Basic stats: 
COMPLETE Column stats: COMPLETE
   Filter Operator
-predicate: (rank_window_0 <= 2) (type: boolean)
+predicate: ((rank_window_0 <= 2) and _col1 is not null) 
(type: boolean)
 Statistics: Num rows: 8 Data size: 3928 Basic stats: 
COMPLETE Column stats: COMPLETE
 Select Operator
   expressions: _col1 (type: string), _col2 (type: string)
@@ -516,19 +516,16 @@ STAGE PLANS:
 mode: mergepartial
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 4 Data size: 876 Basic stats: COMPLETE 
Column stats: COMPLETE
-Filter Operator
-  predicate: _col0 is not null (type: boolean)
-  Statistics: Num rows: 4 Data size: 876 Basic stats: COMPLETE 
Column stats: COMPLETE
-  Select Operator
-expressions: _col0 (type: string), _col1 (type: string), 
true (type: boolean)
-outputColumnNames: _col0, _col1, _col2
+Select Operator
+  expressions: _col0 (type: string), _col1 (type: string), 
true (type: boolean)
+  outputColumnNames: _col0, _col1, _col2
+  Statistics: Num rows: 4 Data size: 892 Basic stats: COMPLETE 
Column stats: COMPLETE
+  Reduce Output Operator
+key expressions: _col0 (type: string), _col1 (type: string)
+sort order: ++
+Map-reduce partition columns: _col0 (type: string), _col1 
(type: string)
 Statistics: Num rows: 4 Data size: 892 Basic stats: 
COMPLETE Column stats: COMPLETE
-Reduce Output Operator
-  key expressions: _col0 (type: string), _col1 (type: 
string)
-  sort order: ++
-  Map-reduce partition columns: _col0 (type: string), 
_col1 (type: string)
-  Statistics: Num rows: 4 Data size: 892 Basic stats: 
COMPLETE Column stats: COMPLETE
-  value expressions: _col2 (type: boolean)
+value expressions: _col2 (type: boolean)
 
   Stage: Stage-0
 Fetch Operator
@@ -932,17 +929,17 @@ STAGE PLANS:
 outputColumnNames: _col0, _col1, _col2, _col4
 Statistics: Num rows: 26 Data size: 5886 Basic stats: COMPLETE 
Column stats: COMPLETE
 Filter Operator
-  predicate: (sq_count_check(_col4, true) > 0) (type: boolean)
+  predicate: (sq_count_check(CASE WHEN (_col4 is null) THEN 
(0) ELSE (_col4) END, true) > 0) (type: boolean)
   Statistics: Num rows: 8 Data size: 1816 Basic stats: 
COMPLETE Column stats: COMPLETE
   Select Operator
 expressions: _col0 (type: string), _col1 (type: string), 
_col2 (type: int)
 outputColumnNames: _col0, _col1, _col2
-Statistics: Num rows: 8 Data size: 1816 Basic stats: 
COMPLETE Column stats: COMPLETE
+Statistics: Num rows: 8 Data size: 1784 Basic stats: 
COMPLETE Column stats: COMPLETE
 Reduce Output Operator
   key expressions: _col1 (type: string)
   sort order: +
 

[1/8] hive git commit: HIVE-19358 : CBO decorrelation logic should generate Hive operators (Jesus Camacho Rodriguez via Ashutosh Chauhan)

2018-06-04 Thread hashutosh
Repository: hive
Updated Branches:
  refs/heads/branch-3 e5f7714ac -> bda5d51dd


http://git-wip-us.apache.org/repos/asf/hive/blob/bda5d51d/ql/src/test/results/clientpositive/spark/subquery_views.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/subquery_views.q.out 
b/ql/src/test/results/clientpositive/spark/subquery_views.q.out
index af7ce71..572fe2e 100644
--- a/ql/src/test/results/clientpositive/spark/subquery_views.q.out
+++ b/ql/src/test/results/clientpositive/spark/subquery_views.q.out
@@ -160,19 +160,19 @@ STAGE PLANS:
   alias: a
   Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
   Filter Operator
-predicate: ((value > 'val_11') and key is not null) (type: 
boolean)
-Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+predicate: ((key < '11') and (value > 'val_11')) (type: 
boolean)
+Statistics: Num rows: 55 Data size: 584 Basic stats: 
COMPLETE Column stats: NONE
 Group By Operator
   aggregations: count(), count(key)
   keys: key (type: string), value (type: string)
   mode: hash
   outputColumnNames: _col0, _col1, _col2, _col3
-  Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+  Statistics: Num rows: 55 Data size: 584 Basic stats: 
COMPLETE Column stats: NONE
   Reduce Output Operator
 key expressions: _col0 (type: string), _col1 (type: 
string)
 sort order: ++
 Map-reduce partition columns: _col0 (type: string), 
_col1 (type: string)
-Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+Statistics: Num rows: 55 Data size: 584 Basic stats: 
COMPLETE Column stats: NONE
 value expressions: _col2 (type: bigint), _col3 (type: 
bigint)
 Execution mode: vectorized
 Map 15 
@@ -181,18 +181,18 @@ STAGE PLANS:
   alias: a
   Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
   Filter Operator
-predicate: ((value > 'val_11') and key is not null) (type: 
boolean)
-Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+predicate: ((key < '11') and (value > 'val_11')) (type: 
boolean)
+Statistics: Num rows: 55 Data size: 584 Basic stats: 
COMPLETE Column stats: NONE
 Group By Operator
   keys: key (type: string), value (type: string)
   mode: hash
   outputColumnNames: _col0, _col1
-  Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+  Statistics: Num rows: 55 Data size: 584 Basic stats: 
COMPLETE Column stats: NONE
   Reduce Output Operator
 key expressions: _col0 (type: string), _col1 (type: 
string)
 sort order: ++
 Map-reduce partition columns: _col0 (type: string), 
_col1 (type: string)
-Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+Statistics: Num rows: 55 Data size: 584 Basic stats: 
COMPLETE Column stats: NONE
 Execution mode: vectorized
 Map 18 
 Map Operator Tree:
@@ -201,20 +201,19 @@ STAGE PLANS:
   properties:
 insideView TRUE
   Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-  Select Operator
-expressions: key (type: string)
-outputColumnNames: key
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+  Filter Operator
+predicate: (key < '11') (type: boolean)
+Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
 Group By Operator
   keys: key (type: string)
   mode: hash
   outputColumnNames: _col0
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+  Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
   Reduce Output Operator
 key expressions: _col0 (type: string)
 

[3/8] hive git commit: HIVE-19358 : CBO decorrelation logic should generate Hive operators (Jesus Camacho Rodriguez via Ashutosh Chauhan)

2018-06-04 Thread hashutosh
http://git-wip-us.apache.org/repos/asf/hive/blob/bda5d51d/ql/src/test/results/clientpositive/spark/subquery_multi.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/subquery_multi.q.out 
b/ql/src/test/results/clientpositive/spark/subquery_multi.q.out
index 4b48a59..4224c67 100644
--- a/ql/src/test/results/clientpositive/spark/subquery_multi.q.out
+++ b/ql/src/test/results/clientpositive/spark/subquery_multi.q.out
@@ -1691,7 +1691,7 @@ STAGE PLANS:
   alias: part
   Statistics: Num rows: 26 Data size: 3147 Basic stats: 
COMPLETE Column stats: NONE
   Filter Operator
-predicate: (p_brand is not null and p_type is not null) 
(type: boolean)
+predicate: (p_brand is not null and p_container is not 
null and p_type is not null) (type: boolean)
 Statistics: Num rows: 26 Data size: 3147 Basic stats: 
COMPLETE Column stats: NONE
 Select Operator
   expressions: p_brand (type: string), p_type (type: 
string), p_container (type: string)
@@ -1820,19 +1820,16 @@ STAGE PLANS:
 mode: mergepartial
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE 
Column stats: NONE
-Filter Operator
-  predicate: _col0 is not null (type: boolean)
+Select Operator
+  expressions: _col0 (type: string), _col1 (type: string), 
true (type: boolean)
+  outputColumnNames: _col0, _col1, _col2
   Statistics: Num rows: 14 Data size: 1730 Basic stats: 
COMPLETE Column stats: NONE
-  Select Operator
-expressions: _col0 (type: string), _col1 (type: string), 
true (type: boolean)
-outputColumnNames: _col0, _col1, _col2
+  Reduce Output Operator
+key expressions: _col0 (type: string), _col1 (type: string)
+sort order: ++
+Map-reduce partition columns: _col0 (type: string), _col1 
(type: string)
 Statistics: Num rows: 14 Data size: 1730 Basic stats: 
COMPLETE Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string), _col1 (type: 
string)
-  sort order: ++
-  Map-reduce partition columns: _col0 (type: string), 
_col1 (type: string)
-  Statistics: Num rows: 14 Data size: 1730 Basic stats: 
COMPLETE Column stats: NONE
-  value expressions: _col2 (type: boolean)
+value expressions: _col2 (type: boolean)
 Reducer 2 
 Reduce Operator Tree:
   Join Operator
@@ -1876,15 +1873,15 @@ STAGE PLANS:
 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6, _col7, _col8, _col10, _col11, _col14
 Statistics: Num rows: 33 Data size: 4187 Basic stats: COMPLETE 
Column stats: NONE
 Filter Operator
-  predicate: (not CASE WHEN ((_col10 = 0L)) THEN (false) WHEN 
(_col10 is null) THEN (false) WHEN (_col14 is not null) THEN (true) WHEN (_col3 
is null) THEN (null) WHEN ((_col11 < _col10)) THEN (true) ELSE (false) END) 
(type: boolean)
-  Statistics: Num rows: 17 Data size: 2156 Basic stats: 
COMPLETE Column stats: NONE
+  predicate: CASE WHEN ((_col10 = 0L)) THEN (true) WHEN 
(_col10 is null) THEN (true) WHEN (_col14 is not null) THEN (false) WHEN (_col3 
is null) THEN (null) WHEN ((_col11 < _col10)) THEN (false) ELSE (true) END 
(type: boolean)
+  Statistics: Num rows: 16 Data size: 2030 Basic stats: 
COMPLETE Column stats: NONE
   Select Operator
 expressions: _col0 (type: int), _col1 (type: string), 
_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: 
int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
 outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8
-Statistics: Num rows: 17 Data size: 2156 Basic stats: 
COMPLETE Column stats: NONE
+Statistics: Num rows: 16 Data size: 2030 Basic stats: 
COMPLETE Column stats: NONE
 File Output Operator
   compressed: false
-  Statistics: Num rows: 17 Data size: 2156 Basic stats: 
COMPLETE Column stats: NONE
+  Statistics: Num rows: 16 Data size: 2030 Basic stats: 
COMPLETE Column stats: NONE
   table:
   input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
   output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@

[5/8] hive git commit: HIVE-19358 : CBO decorrelation logic should generate Hive operators (Jesus Camacho Rodriguez via Ashutosh Chauhan)

2018-06-04 Thread hashutosh
http://git-wip-us.apache.org/repos/asf/hive/blob/bda5d51d/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out 
b/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out
index d49804b..5e24ae6 100644
--- a/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out
+++ b/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out
@@ -1326,16 +1326,19 @@ STAGE PLANS:
 TableScan
   alias: e
   Statistics: Num rows: 26 Data size: 3250 Basic stats: 
COMPLETE Column stats: COMPLETE
-  Select Operator
-expressions: p_name (type: string), p_size (type: int)
-outputColumnNames: _col0, _col1
+  Filter Operator
+predicate: p_name is not null (type: boolean)
 Statistics: Num rows: 26 Data size: 3250 Basic stats: 
COMPLETE Column stats: COMPLETE
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  sort order: +
-  Map-reduce partition columns: _col0 (type: string)
+Select Operator
+  expressions: p_name (type: string), p_size (type: int)
+  outputColumnNames: _col0, _col1
   Statistics: Num rows: 26 Data size: 3250 Basic stats: 
COMPLETE Column stats: COMPLETE
-  value expressions: _col1 (type: int)
+  Reduce Output Operator
+key expressions: _col0 (type: string)
+sort order: +
+Map-reduce partition columns: _col0 (type: string)
+Statistics: Num rows: 26 Data size: 3250 Basic stats: 
COMPLETE Column stats: COMPLETE
+value expressions: _col1 (type: int)
 Execution mode: vectorized, llap
 LLAP IO: no inputs
 Map 4 
@@ -1368,9 +1371,9 @@ STAGE PLANS:
  Inner Join 0 to 1
 keys:
   0 _col0 (type: string)
-  1 _col2 (type: string)
-outputColumnNames: _col1, _col2, _col3
-residual filter predicates: {((_col1 + 100) < CASE WHEN (_col3 
is null) THEN (null) ELSE (_col2) END)}
+  1 _col1 (type: string)
+outputColumnNames: _col1, _col2
+residual filter predicates: {((_col1 + 100) < _col2)}
 Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE 
Column stats: COMPLETE
 Select Operator
   Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE 
Column stats: COMPLETE
@@ -1408,15 +1411,15 @@ STAGE PLANS:
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 13 Data size: 1625 Basic stats: COMPLETE 
Column stats: COMPLETE
 Select Operator
-  expressions: _col1 (type: int), true (type: boolean), _col0 
(type: string)
-  outputColumnNames: _col0, _col1, _col2
-  Statistics: Num rows: 13 Data size: 1677 Basic stats: 
COMPLETE Column stats: COMPLETE
+  expressions: _col1 (type: int), _col0 (type: string)
+  outputColumnNames: _col0, _col1
+  Statistics: Num rows: 13 Data size: 1625 Basic stats: 
COMPLETE Column stats: COMPLETE
   Reduce Output Operator
-key expressions: _col2 (type: string)
+key expressions: _col1 (type: string)
 sort order: +
-Map-reduce partition columns: _col2 (type: string)
-Statistics: Num rows: 13 Data size: 1677 Basic stats: 
COMPLETE Column stats: COMPLETE
-value expressions: _col0 (type: int), _col1 (type: boolean)
+Map-reduce partition columns: _col1 (type: string)
+Statistics: Num rows: 13 Data size: 1625 Basic stats: 
COMPLETE Column stats: COMPLETE
+value expressions: _col0 (type: int)
 
   Stage: Stage-0
 Fetch Operator
@@ -1456,15 +1459,18 @@ STAGE PLANS:
 TableScan
   alias: e
   Statistics: Num rows: 26 Data size: 3146 Basic stats: 
COMPLETE Column stats: COMPLETE
-  Select Operator
-expressions: p_name (type: string)
-outputColumnNames: _col0
+  Filter Operator
+predicate: p_name is not null (type: boolean)
 Statistics: Num rows: 26 Data size: 3146 Basic stats: 
COMPLETE Column stats: COMPLETE
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  sort o

[2/8] hive git commit: HIVE-19358 : CBO decorrelation logic should generate Hive operators (Jesus Camacho Rodriguez via Ashutosh Chauhan)

2018-06-04 Thread hashutosh
http://git-wip-us.apache.org/repos/asf/hive/blob/bda5d51d/ql/src/test/results/clientpositive/spark/subquery_scalar.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/subquery_scalar.q.out 
b/ql/src/test/results/clientpositive/spark/subquery_scalar.q.out
index 5b3ad71..b347231 100644
--- a/ql/src/test/results/clientpositive/spark/subquery_scalar.q.out
+++ b/ql/src/test/results/clientpositive/spark/subquery_scalar.q.out
@@ -1326,16 +1326,19 @@ STAGE PLANS:
 TableScan
   alias: e
   Statistics: Num rows: 26 Data size: 3147 Basic stats: 
COMPLETE Column stats: NONE
-  Select Operator
-expressions: p_name (type: string), p_size (type: int)
-outputColumnNames: _col0, _col1
+  Filter Operator
+predicate: p_name is not null (type: boolean)
 Statistics: Num rows: 26 Data size: 3147 Basic stats: 
COMPLETE Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  sort order: +
-  Map-reduce partition columns: _col0 (type: string)
+Select Operator
+  expressions: p_name (type: string), p_size (type: int)
+  outputColumnNames: _col0, _col1
   Statistics: Num rows: 26 Data size: 3147 Basic stats: 
COMPLETE Column stats: NONE
-  value expressions: _col1 (type: int)
+  Reduce Output Operator
+key expressions: _col0 (type: string)
+sort order: +
+Map-reduce partition columns: _col0 (type: string)
+Statistics: Num rows: 26 Data size: 3147 Basic stats: 
COMPLETE Column stats: NONE
+value expressions: _col1 (type: int)
 Execution mode: vectorized
 Map 4 
 Map Operator Tree:
@@ -1365,11 +1368,11 @@ STAGE PLANS:
  Inner Join 0 to 1
 keys:
   0 _col0 (type: string)
-  1 _col2 (type: string)
-outputColumnNames: _col1, _col2, _col3
+  1 _col1 (type: string)
+outputColumnNames: _col1, _col2
 Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE 
Column stats: NONE
 Filter Operator
-  predicate: ((_col1 + 100) < CASE WHEN (_col3 is null) THEN 
(null) ELSE (_col2) END) (type: boolean)
+  predicate: ((_col1 + 100) < _col2) (type: boolean)
   Statistics: Num rows: 9 Data size: 1112 Basic stats: 
COMPLETE Column stats: NONE
   Select Operator
 Statistics: Num rows: 9 Data size: 1112 Basic stats: 
COMPLETE Column stats: NONE
@@ -1407,15 +1410,15 @@ STAGE PLANS:
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE 
Column stats: NONE
 Select Operator
-  expressions: _col1 (type: int), true (type: boolean), _col0 
(type: string)
-  outputColumnNames: _col0, _col1, _col2
+  expressions: _col1 (type: int), _col0 (type: string)
+  outputColumnNames: _col0, _col1
   Statistics: Num rows: 13 Data size: 1573 Basic stats: 
COMPLETE Column stats: NONE
   Reduce Output Operator
-key expressions: _col2 (type: string)
+key expressions: _col1 (type: string)
 sort order: +
-Map-reduce partition columns: _col2 (type: string)
+Map-reduce partition columns: _col1 (type: string)
 Statistics: Num rows: 13 Data size: 1573 Basic stats: 
COMPLETE Column stats: NONE
-value expressions: _col0 (type: int), _col1 (type: boolean)
+value expressions: _col0 (type: int)
 
   Stage: Stage-0
 Fetch Operator
@@ -1454,15 +1457,18 @@ STAGE PLANS:
 TableScan
   alias: e
   Statistics: Num rows: 26 Data size: 3147 Basic stats: 
COMPLETE Column stats: NONE
-  Select Operator
-expressions: p_name (type: string)
-outputColumnNames: _col0
+  Filter Operator
+predicate: p_name is not null (type: boolean)
 Statistics: Num rows: 26 Data size: 3147 Basic stats: 
COMPLETE Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  sort order: +
-  Map-reduce partition columns: _col0 (type: string)
+Select Operat

[1/8] hive git commit: HIVE-19358 : CBO decorrelation logic should generate Hive operators (Jesus Camacho Rodriguez via Ashutosh Chauhan)

2018-06-04 Thread hashutosh
Repository: hive
Updated Branches:
  refs/heads/master 0ebf04c87 -> f567a8231


http://git-wip-us.apache.org/repos/asf/hive/blob/f567a823/ql/src/test/results/clientpositive/spark/subquery_views.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/subquery_views.q.out 
b/ql/src/test/results/clientpositive/spark/subquery_views.q.out
index af7ce71..572fe2e 100644
--- a/ql/src/test/results/clientpositive/spark/subquery_views.q.out
+++ b/ql/src/test/results/clientpositive/spark/subquery_views.q.out
@@ -160,19 +160,19 @@ STAGE PLANS:
   alias: a
   Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
   Filter Operator
-predicate: ((value > 'val_11') and key is not null) (type: 
boolean)
-Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+predicate: ((key < '11') and (value > 'val_11')) (type: 
boolean)
+Statistics: Num rows: 55 Data size: 584 Basic stats: 
COMPLETE Column stats: NONE
 Group By Operator
   aggregations: count(), count(key)
   keys: key (type: string), value (type: string)
   mode: hash
   outputColumnNames: _col0, _col1, _col2, _col3
-  Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+  Statistics: Num rows: 55 Data size: 584 Basic stats: 
COMPLETE Column stats: NONE
   Reduce Output Operator
 key expressions: _col0 (type: string), _col1 (type: 
string)
 sort order: ++
 Map-reduce partition columns: _col0 (type: string), 
_col1 (type: string)
-Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+Statistics: Num rows: 55 Data size: 584 Basic stats: 
COMPLETE Column stats: NONE
 value expressions: _col2 (type: bigint), _col3 (type: 
bigint)
 Execution mode: vectorized
 Map 15 
@@ -181,18 +181,18 @@ STAGE PLANS:
   alias: a
   Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
   Filter Operator
-predicate: ((value > 'val_11') and key is not null) (type: 
boolean)
-Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+predicate: ((key < '11') and (value > 'val_11')) (type: 
boolean)
+Statistics: Num rows: 55 Data size: 584 Basic stats: 
COMPLETE Column stats: NONE
 Group By Operator
   keys: key (type: string), value (type: string)
   mode: hash
   outputColumnNames: _col0, _col1
-  Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+  Statistics: Num rows: 55 Data size: 584 Basic stats: 
COMPLETE Column stats: NONE
   Reduce Output Operator
 key expressions: _col0 (type: string), _col1 (type: 
string)
 sort order: ++
 Map-reduce partition columns: _col0 (type: string), 
_col1 (type: string)
-Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+Statistics: Num rows: 55 Data size: 584 Basic stats: 
COMPLETE Column stats: NONE
 Execution mode: vectorized
 Map 18 
 Map Operator Tree:
@@ -201,20 +201,19 @@ STAGE PLANS:
   properties:
 insideView TRUE
   Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-  Select Operator
-expressions: key (type: string)
-outputColumnNames: key
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+  Filter Operator
+predicate: (key < '11') (type: boolean)
+Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
 Group By Operator
   keys: key (type: string)
   mode: hash
   outputColumnNames: _col0
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+  Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
   Reduce Output Operator
 key expressions: _col0 (type: string)
   

[2/8] hive git commit: HIVE-19358 : CBO decorrelation logic should generate Hive operators (Jesus Camacho Rodriguez via Ashutosh Chauhan)

2018-06-04 Thread hashutosh
http://git-wip-us.apache.org/repos/asf/hive/blob/f567a823/ql/src/test/results/clientpositive/spark/subquery_scalar.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/subquery_scalar.q.out 
b/ql/src/test/results/clientpositive/spark/subquery_scalar.q.out
index 5b3ad71..b347231 100644
--- a/ql/src/test/results/clientpositive/spark/subquery_scalar.q.out
+++ b/ql/src/test/results/clientpositive/spark/subquery_scalar.q.out
@@ -1326,16 +1326,19 @@ STAGE PLANS:
 TableScan
   alias: e
   Statistics: Num rows: 26 Data size: 3147 Basic stats: 
COMPLETE Column stats: NONE
-  Select Operator
-expressions: p_name (type: string), p_size (type: int)
-outputColumnNames: _col0, _col1
+  Filter Operator
+predicate: p_name is not null (type: boolean)
 Statistics: Num rows: 26 Data size: 3147 Basic stats: 
COMPLETE Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  sort order: +
-  Map-reduce partition columns: _col0 (type: string)
+Select Operator
+  expressions: p_name (type: string), p_size (type: int)
+  outputColumnNames: _col0, _col1
   Statistics: Num rows: 26 Data size: 3147 Basic stats: 
COMPLETE Column stats: NONE
-  value expressions: _col1 (type: int)
+  Reduce Output Operator
+key expressions: _col0 (type: string)
+sort order: +
+Map-reduce partition columns: _col0 (type: string)
+Statistics: Num rows: 26 Data size: 3147 Basic stats: 
COMPLETE Column stats: NONE
+value expressions: _col1 (type: int)
 Execution mode: vectorized
 Map 4 
 Map Operator Tree:
@@ -1365,11 +1368,11 @@ STAGE PLANS:
  Inner Join 0 to 1
 keys:
   0 _col0 (type: string)
-  1 _col2 (type: string)
-outputColumnNames: _col1, _col2, _col3
+  1 _col1 (type: string)
+outputColumnNames: _col1, _col2
 Statistics: Num rows: 28 Data size: 3461 Basic stats: COMPLETE 
Column stats: NONE
 Filter Operator
-  predicate: ((_col1 + 100) < CASE WHEN (_col3 is null) THEN 
(null) ELSE (_col2) END) (type: boolean)
+  predicate: ((_col1 + 100) < _col2) (type: boolean)
   Statistics: Num rows: 9 Data size: 1112 Basic stats: 
COMPLETE Column stats: NONE
   Select Operator
 Statistics: Num rows: 9 Data size: 1112 Basic stats: 
COMPLETE Column stats: NONE
@@ -1407,15 +1410,15 @@ STAGE PLANS:
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE 
Column stats: NONE
 Select Operator
-  expressions: _col1 (type: int), true (type: boolean), _col0 
(type: string)
-  outputColumnNames: _col0, _col1, _col2
+  expressions: _col1 (type: int), _col0 (type: string)
+  outputColumnNames: _col0, _col1
   Statistics: Num rows: 13 Data size: 1573 Basic stats: 
COMPLETE Column stats: NONE
   Reduce Output Operator
-key expressions: _col2 (type: string)
+key expressions: _col1 (type: string)
 sort order: +
-Map-reduce partition columns: _col2 (type: string)
+Map-reduce partition columns: _col1 (type: string)
 Statistics: Num rows: 13 Data size: 1573 Basic stats: 
COMPLETE Column stats: NONE
-value expressions: _col0 (type: int), _col1 (type: boolean)
+value expressions: _col0 (type: int)
 
   Stage: Stage-0
 Fetch Operator
@@ -1454,15 +1457,18 @@ STAGE PLANS:
 TableScan
   alias: e
   Statistics: Num rows: 26 Data size: 3147 Basic stats: 
COMPLETE Column stats: NONE
-  Select Operator
-expressions: p_name (type: string)
-outputColumnNames: _col0
+  Filter Operator
+predicate: p_name is not null (type: boolean)
 Statistics: Num rows: 26 Data size: 3147 Basic stats: 
COMPLETE Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  sort order: +
-  Map-reduce partition columns: _col0 (type: string)
+Select Operat

[8/8] hive git commit: HIVE-19358 : CBO decorrelation logic should generate Hive operators (Jesus Camacho Rodriguez via Ashutosh Chauhan)

2018-06-04 Thread hashutosh
HIVE-19358 : CBO decorrelation logic should generate Hive operators (Jesus 
Camacho Rodriguez via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f567a823
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f567a823
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f567a823

Branch: refs/heads/master
Commit: f567a8231f4329e77d8eceb68b40d0cee8022a47
Parents: 0ebf04c
Author: Jesus Camacho Rodriguez 
Authored: Sun Apr 29 14:14:00 2018 -0700
Committer: Ashutosh Chauhan 
Committed: Mon Jun 4 21:00:15 2018 -0700

--
 .../ql/optimizer/calcite/HiveRelOptUtil.java| 113 +++
 .../calcite/cost/HiveVolcanoPlanner.java|   5 +-
 .../calcite/rules/HiveRelDecorrelator.java  | 442 +--
 .../clientpositive/llap/explainuser_1.q.out | 112 ++-
 .../llap/groupby_groupingset_bug.q.out  |  16 +-
 .../results/clientpositive/llap/lineage3.q.out  |   4 +-
 .../clientpositive/llap/subquery_in.q.out   | 100 ++-
 .../llap/subquery_in_having.q.out   |  60 +-
 .../clientpositive/llap/subquery_multi.q.out| 212 --
 .../clientpositive/llap/subquery_notin.q.out| 738 ++-
 .../clientpositive/llap/subquery_scalar.q.out   | 545 +++---
 .../clientpositive/llap/subquery_select.q.out   | 100 ++-
 .../clientpositive/llap/subquery_views.q.out|  93 +--
 .../clientpositive/perf/spark/query1.q.out  |  16 +-
 .../clientpositive/perf/spark/query30.q.out |  18 +-
 .../clientpositive/perf/spark/query32.q.out |  18 +-
 .../clientpositive/perf/spark/query6.q.out  |  22 +-
 .../clientpositive/perf/spark/query81.q.out |  18 +-
 .../clientpositive/perf/spark/query92.q.out |  18 +-
 .../clientpositive/perf/tez/query1.q.out|   8 +-
 .../clientpositive/perf/tez/query30.q.out   |  10 +-
 .../clientpositive/perf/tez/query32.q.out   |   8 +-
 .../clientpositive/perf/tez/query6.q.out|  12 +-
 .../clientpositive/perf/tez/query81.q.out   |  50 +-
 .../clientpositive/perf/tez/query92.q.out   |   8 +-
 .../spark/spark_explainuser_1.q.out | 120 +--
 .../clientpositive/spark/subquery_in.q.out  |  84 +--
 .../clientpositive/spark/subquery_multi.q.out   |  87 +--
 .../clientpositive/spark/subquery_notin.q.out   | 512 ++---
 .../clientpositive/spark/subquery_scalar.q.out  | 537 +++---
 .../clientpositive/spark/subquery_select.q.out  |  90 +--
 .../clientpositive/spark/subquery_views.q.out   |  85 ++-
 .../clientpositive/subquery_notexists.q.out |  42 +-
 .../clientpositive/subquery_notin_having.q.out  |  43 +-
 .../subquery_unqualcolumnrefs.q.out |  25 +-
 35 files changed, 2233 insertions(+), 2138 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/f567a823/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
index 50fbb78..268284a 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
@@ -21,13 +21,19 @@ import java.util.AbstractList;
 import java.util.ArrayList;
 import java.util.List;
 
+import com.google.common.collect.ImmutableList;
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelOptUtil;
 import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.AggregateCall;
+import org.apache.calcite.rel.core.Filter;
+import org.apache.calcite.rel.core.RelFactories;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.rex.RexBuilder;
 import org.apache.calcite.rex.RexCall;
+import org.apache.calcite.rex.RexFieldAccess;
+import org.apache.calcite.rex.RexInputRef;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.rex.RexUtil;
 import org.apache.calcite.sql.SqlKind;
@@ -347,4 +353,111 @@ public class HiveRelOptUtil extends RelOptUtil {
 }, true, relBuilder);
   }
 
+  public static RexNode splitCorrelatedFilterCondition(
+  Filter filter,
+  List joinKeys,
+  List correlatedJoinKeys,
+  boolean extractCorrelatedFieldAccess) {
+final List nonEquiList = new ArrayList<>();
+
+splitCorrelatedFilterCondition(
+filter,
+filter.getCondition(),
+joinKeys,
+correlatedJoinKeys,
+nonEquiList,
+extractCorrelatedFieldAccess);
+
+// Convert the remainders into a list that are AND'ed together.
+return RexUtil.composeConjunction(
+   

[4/8] hive git commit: HIVE-19358 : CBO decorrelation logic should generate Hive operators (Jesus Camacho Rodriguez via Ashutosh Chauhan)

2018-06-04 Thread hashutosh
http://git-wip-us.apache.org/repos/asf/hive/blob/f567a823/ql/src/test/results/clientpositive/llap/subquery_views.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/subquery_views.q.out 
b/ql/src/test/results/clientpositive/llap/subquery_views.q.out
index 2c85309..4609668 100644
--- a/ql/src/test/results/clientpositive/llap/subquery_views.q.out
+++ b/ql/src/test/results/clientpositive/llap/subquery_views.q.out
@@ -132,33 +132,33 @@ STAGE PLANS:
 Map-reduce partition columns: _col0 (type: string), 
_col1 (type: string)
 Statistics: Num rows: 166 Data size: 29548 Basic 
stats: COMPLETE Column stats: COMPLETE
   Filter Operator
-predicate: ((value > 'val_11') and key is not null) (type: 
boolean)
-Statistics: Num rows: 166 Data size: 29548 Basic stats: 
COMPLETE Column stats: COMPLETE
+predicate: ((key < '11') and (value > 'val_11')) (type: 
boolean)
+Statistics: Num rows: 55 Data size: 9790 Basic stats: 
COMPLETE Column stats: COMPLETE
 Group By Operator
   aggregations: count(), count(key)
   keys: key (type: string), value (type: string)
   mode: hash
   outputColumnNames: _col0, _col1, _col2, _col3
-  Statistics: Num rows: 83 Data size: 16102 Basic stats: 
COMPLETE Column stats: COMPLETE
+  Statistics: Num rows: 27 Data size: 5238 Basic stats: 
COMPLETE Column stats: COMPLETE
   Reduce Output Operator
 key expressions: _col0 (type: string), _col1 (type: 
string)
 sort order: ++
 Map-reduce partition columns: _col0 (type: string), 
_col1 (type: string)
-Statistics: Num rows: 83 Data size: 16102 Basic stats: 
COMPLETE Column stats: COMPLETE
+Statistics: Num rows: 27 Data size: 5238 Basic stats: 
COMPLETE Column stats: COMPLETE
 value expressions: _col2 (type: bigint), _col3 (type: 
bigint)
   Filter Operator
-predicate: ((value > 'val_11') and key is not null) (type: 
boolean)
-Statistics: Num rows: 166 Data size: 29548 Basic stats: 
COMPLETE Column stats: COMPLETE
+predicate: ((key < '11') and (value > 'val_11')) (type: 
boolean)
+Statistics: Num rows: 55 Data size: 9790 Basic stats: 
COMPLETE Column stats: COMPLETE
 Group By Operator
   keys: key (type: string), value (type: string)
   mode: hash
   outputColumnNames: _col0, _col1
-  Statistics: Num rows: 83 Data size: 14774 Basic stats: 
COMPLETE Column stats: COMPLETE
+  Statistics: Num rows: 27 Data size: 4806 Basic stats: 
COMPLETE Column stats: COMPLETE
   Reduce Output Operator
 key expressions: _col0 (type: string), _col1 (type: 
string)
 sort order: ++
 Map-reduce partition columns: _col0 (type: string), 
_col1 (type: string)
-Statistics: Num rows: 83 Data size: 14774 Basic stats: 
COMPLETE Column stats: COMPLETE
+Statistics: Num rows: 27 Data size: 4806 Basic stats: 
COMPLETE Column stats: COMPLETE
   Filter Operator
 predicate: (key < '11') (type: boolean)
 Statistics: Num rows: 166 Data size: 29548 Basic stats: 
COMPLETE Column stats: COMPLETE
@@ -172,19 +172,19 @@ STAGE PLANS:
 Map-reduce partition columns: _col0 (type: string), 
_col1 (type: string)
 Statistics: Num rows: 166 Data size: 29548 Basic 
stats: COMPLETE Column stats: COMPLETE
   Filter Operator
-predicate: ((value > 'val_11') and key is not null) (type: 
boolean)
-Statistics: Num rows: 166 Data size: 29548 Basic stats: 
COMPLETE Column stats: COMPLETE
+predicate: ((key < '11') and (value > 'val_11')) (type: 
boolean)
+Statistics: Num rows: 55 Data size: 9790 Basic stats: 
COMPLETE Column stats: COMPLETE
 Group By Operator
   aggregations: count(), count(key)
   keys: key (type: string), value (type: string)
   mode: hash
   outputColumnNames: _col0, _col1, _col2, _col3
-  Statistics: Num rows: 83 Data size: 16102 Basic stats: 
COMPLETE Column stats: COMPLETE
+  Statistics: Num rows: 27 Data size: 5238 Basic stats: 
COMPLETE Column stats: COMPLETE
   Reduce Output

[3/8] hive git commit: HIVE-19358 : CBO decorrelation logic should generate Hive operators (Jesus Camacho Rodriguez via Ashutosh Chauhan)

2018-06-04 Thread hashutosh
http://git-wip-us.apache.org/repos/asf/hive/blob/f567a823/ql/src/test/results/clientpositive/spark/subquery_multi.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/subquery_multi.q.out 
b/ql/src/test/results/clientpositive/spark/subquery_multi.q.out
index 4b48a59..4224c67 100644
--- a/ql/src/test/results/clientpositive/spark/subquery_multi.q.out
+++ b/ql/src/test/results/clientpositive/spark/subquery_multi.q.out
@@ -1691,7 +1691,7 @@ STAGE PLANS:
   alias: part
   Statistics: Num rows: 26 Data size: 3147 Basic stats: 
COMPLETE Column stats: NONE
   Filter Operator
-predicate: (p_brand is not null and p_type is not null) 
(type: boolean)
+predicate: (p_brand is not null and p_container is not 
null and p_type is not null) (type: boolean)
 Statistics: Num rows: 26 Data size: 3147 Basic stats: 
COMPLETE Column stats: NONE
 Select Operator
   expressions: p_brand (type: string), p_type (type: 
string), p_container (type: string)
@@ -1820,19 +1820,16 @@ STAGE PLANS:
 mode: mergepartial
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE 
Column stats: NONE
-Filter Operator
-  predicate: _col0 is not null (type: boolean)
+Select Operator
+  expressions: _col0 (type: string), _col1 (type: string), 
true (type: boolean)
+  outputColumnNames: _col0, _col1, _col2
   Statistics: Num rows: 14 Data size: 1730 Basic stats: 
COMPLETE Column stats: NONE
-  Select Operator
-expressions: _col0 (type: string), _col1 (type: string), 
true (type: boolean)
-outputColumnNames: _col0, _col1, _col2
+  Reduce Output Operator
+key expressions: _col0 (type: string), _col1 (type: string)
+sort order: ++
+Map-reduce partition columns: _col0 (type: string), _col1 
(type: string)
 Statistics: Num rows: 14 Data size: 1730 Basic stats: 
COMPLETE Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string), _col1 (type: 
string)
-  sort order: ++
-  Map-reduce partition columns: _col0 (type: string), 
_col1 (type: string)
-  Statistics: Num rows: 14 Data size: 1730 Basic stats: 
COMPLETE Column stats: NONE
-  value expressions: _col2 (type: boolean)
+value expressions: _col2 (type: boolean)
 Reducer 2 
 Reduce Operator Tree:
   Join Operator
@@ -1876,15 +1873,15 @@ STAGE PLANS:
 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6, _col7, _col8, _col10, _col11, _col14
 Statistics: Num rows: 33 Data size: 4187 Basic stats: COMPLETE 
Column stats: NONE
 Filter Operator
-  predicate: (not CASE WHEN ((_col10 = 0L)) THEN (false) WHEN 
(_col10 is null) THEN (false) WHEN (_col14 is not null) THEN (true) WHEN (_col3 
is null) THEN (null) WHEN ((_col11 < _col10)) THEN (true) ELSE (false) END) 
(type: boolean)
-  Statistics: Num rows: 17 Data size: 2156 Basic stats: 
COMPLETE Column stats: NONE
+  predicate: CASE WHEN ((_col10 = 0L)) THEN (true) WHEN 
(_col10 is null) THEN (true) WHEN (_col14 is not null) THEN (false) WHEN (_col3 
is null) THEN (null) WHEN ((_col11 < _col10)) THEN (false) ELSE (true) END 
(type: boolean)
+  Statistics: Num rows: 16 Data size: 2030 Basic stats: 
COMPLETE Column stats: NONE
   Select Operator
 expressions: _col0 (type: int), _col1 (type: string), 
_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: 
int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
 outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8
-Statistics: Num rows: 17 Data size: 2156 Basic stats: 
COMPLETE Column stats: NONE
+Statistics: Num rows: 16 Data size: 2030 Basic stats: 
COMPLETE Column stats: NONE
 File Output Operator
   compressed: false
-  Statistics: Num rows: 17 Data size: 2156 Basic stats: 
COMPLETE Column stats: NONE
+  Statistics: Num rows: 16 Data size: 2030 Basic stats: 
COMPLETE Column stats: NONE
   table:
   input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
   output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@

[5/8] hive git commit: HIVE-19358 : CBO decorrelation logic should generate Hive operators (Jesus Camacho Rodriguez via Ashutosh Chauhan)

2018-06-04 Thread hashutosh
http://git-wip-us.apache.org/repos/asf/hive/blob/f567a823/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out 
b/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out
index d49804b..5e24ae6 100644
--- a/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out
+++ b/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out
@@ -1326,16 +1326,19 @@ STAGE PLANS:
 TableScan
   alias: e
   Statistics: Num rows: 26 Data size: 3250 Basic stats: 
COMPLETE Column stats: COMPLETE
-  Select Operator
-expressions: p_name (type: string), p_size (type: int)
-outputColumnNames: _col0, _col1
+  Filter Operator
+predicate: p_name is not null (type: boolean)
 Statistics: Num rows: 26 Data size: 3250 Basic stats: 
COMPLETE Column stats: COMPLETE
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  sort order: +
-  Map-reduce partition columns: _col0 (type: string)
+Select Operator
+  expressions: p_name (type: string), p_size (type: int)
+  outputColumnNames: _col0, _col1
   Statistics: Num rows: 26 Data size: 3250 Basic stats: 
COMPLETE Column stats: COMPLETE
-  value expressions: _col1 (type: int)
+  Reduce Output Operator
+key expressions: _col0 (type: string)
+sort order: +
+Map-reduce partition columns: _col0 (type: string)
+Statistics: Num rows: 26 Data size: 3250 Basic stats: 
COMPLETE Column stats: COMPLETE
+value expressions: _col1 (type: int)
 Execution mode: vectorized, llap
 LLAP IO: no inputs
 Map 4 
@@ -1368,9 +1371,9 @@ STAGE PLANS:
  Inner Join 0 to 1
 keys:
   0 _col0 (type: string)
-  1 _col2 (type: string)
-outputColumnNames: _col1, _col2, _col3
-residual filter predicates: {((_col1 + 100) < CASE WHEN (_col3 
is null) THEN (null) ELSE (_col2) END)}
+  1 _col1 (type: string)
+outputColumnNames: _col1, _col2
+residual filter predicates: {((_col1 + 100) < _col2)}
 Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE 
Column stats: COMPLETE
 Select Operator
   Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE 
Column stats: COMPLETE
@@ -1408,15 +1411,15 @@ STAGE PLANS:
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 13 Data size: 1625 Basic stats: COMPLETE 
Column stats: COMPLETE
 Select Operator
-  expressions: _col1 (type: int), true (type: boolean), _col0 
(type: string)
-  outputColumnNames: _col0, _col1, _col2
-  Statistics: Num rows: 13 Data size: 1677 Basic stats: 
COMPLETE Column stats: COMPLETE
+  expressions: _col1 (type: int), _col0 (type: string)
+  outputColumnNames: _col0, _col1
+  Statistics: Num rows: 13 Data size: 1625 Basic stats: 
COMPLETE Column stats: COMPLETE
   Reduce Output Operator
-key expressions: _col2 (type: string)
+key expressions: _col1 (type: string)
 sort order: +
-Map-reduce partition columns: _col2 (type: string)
-Statistics: Num rows: 13 Data size: 1677 Basic stats: 
COMPLETE Column stats: COMPLETE
-value expressions: _col0 (type: int), _col1 (type: boolean)
+Map-reduce partition columns: _col1 (type: string)
+Statistics: Num rows: 13 Data size: 1625 Basic stats: 
COMPLETE Column stats: COMPLETE
+value expressions: _col0 (type: int)
 
   Stage: Stage-0
 Fetch Operator
@@ -1456,15 +1459,18 @@ STAGE PLANS:
 TableScan
   alias: e
   Statistics: Num rows: 26 Data size: 3146 Basic stats: 
COMPLETE Column stats: COMPLETE
-  Select Operator
-expressions: p_name (type: string)
-outputColumnNames: _col0
+  Filter Operator
+predicate: p_name is not null (type: boolean)
 Statistics: Num rows: 26 Data size: 3146 Basic stats: 
COMPLETE Column stats: COMPLETE
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  sort o

[7/8] hive git commit: HIVE-19358 : CBO decorrelation logic should generate Hive operators (Jesus Camacho Rodriguez via Ashutosh Chauhan)

2018-06-04 Thread hashutosh
http://git-wip-us.apache.org/repos/asf/hive/blob/f567a823/ql/src/test/results/clientpositive/llap/subquery_in.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/subquery_in.q.out 
b/ql/src/test/results/clientpositive/llap/subquery_in.q.out
index 456e324..7dd64af 100644
--- a/ql/src/test/results/clientpositive/llap/subquery_in.q.out
+++ b/ql/src/test/results/clientpositive/llap/subquery_in.q.out
@@ -2106,7 +2106,7 @@ STAGE PLANS:
   alias: p
   Statistics: Num rows: 26 Data size: 3354 Basic stats: 
COMPLETE Column stats: COMPLETE
   Filter Operator
-predicate: p_name is not null (type: boolean)
+predicate: (p_name is not null and p_partkey is not null 
and p_size is not null) (type: boolean)
 Statistics: Num rows: 26 Data size: 3354 Basic stats: 
COMPLETE Column stats: COMPLETE
 Select Operator
   expressions: p_partkey (type: int), p_name (type: 
string), p_size (type: int)
@@ -2495,7 +2495,7 @@ STAGE PLANS:
   alias: part
   Statistics: Num rows: 26 Data size: 2808 Basic stats: 
COMPLETE Column stats: COMPLETE
   Filter Operator
-predicate: p_type is not null (type: boolean)
+predicate: (p_size is not null and p_type is not null) 
(type: boolean)
 Statistics: Num rows: 26 Data size: 2808 Basic stats: 
COMPLETE Column stats: COMPLETE
 Select Operator
   expressions: p_type (type: string), p_size (type: int)
@@ -2514,12 +2514,12 @@ STAGE PLANS:
 TableScan
   alias: part
   Statistics: Num rows: 26 Data size: 104 Basic stats: 
COMPLETE Column stats: COMPLETE
-  Select Operator
-expressions: (p_size + 1) (type: int)
-outputColumnNames: _col0
+  Filter Operator
+predicate: p_size is not null (type: boolean)
 Statistics: Num rows: 26 Data size: 104 Basic stats: 
COMPLETE Column stats: COMPLETE
-Filter Operator
-  predicate: _col0 is not null (type: boolean)
+Select Operator
+  expressions: (p_size + 1) (type: int)
+  outputColumnNames: _col0
   Statistics: Num rows: 26 Data size: 104 Basic stats: 
COMPLETE Column stats: COMPLETE
   Group By Operator
 keys: _col0 (type: int)
@@ -4331,17 +4331,17 @@ STAGE PLANS:
 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6, _col7, _col8, _col10
 Statistics: Num rows: 26 Data size: 16214 Basic stats: 
COMPLETE Column stats: COMPLETE
 Filter Operator
-  predicate: (sq_count_check(_col10, true) > 0) (type: boolean)
+  predicate: (sq_count_check(CASE WHEN (_col10 is null) THEN 
(0) ELSE (_col10) END, true) > 0) (type: boolean)
   Statistics: Num rows: 8 Data size: 4992 Basic stats: 
COMPLETE Column stats: COMPLETE
   Select Operator
 expressions: _col0 (type: int), _col1 (type: string), 
_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: 
int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
 outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8
-Statistics: Num rows: 8 Data size: 4992 Basic stats: 
COMPLETE Column stats: COMPLETE
+Statistics: Num rows: 8 Data size: 4952 Basic stats: 
COMPLETE Column stats: COMPLETE
 Reduce Output Operator
   key expressions: _col4 (type: string), UDFToLong(_col5) 
(type: bigint)
   sort order: ++
   Map-reduce partition columns: _col4 (type: string), 
UDFToLong(_col5) (type: bigint)
-  Statistics: Num rows: 8 Data size: 4992 Basic stats: 
COMPLETE Column stats: COMPLETE
+  Statistics: Num rows: 8 Data size: 4952 Basic stats: 
COMPLETE Column stats: COMPLETE
   value expressions: _col0 (type: int), _col1 (type: 
string), _col2 (type: string), _col3 (type: string), _col5 (type: int), _col6 
(type: string), _col7 (type: double), _col8 (type: string)
 Reducer 3 
 Execution mode: llap
@@ -4837,17 +4837,17 @@ STAGE PLANS:
 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6, _col7, _col8, _col10
 Statistics: Num rows: 26 Data size: 16214 Basic stats: 
COMPLETE Column stats: COMPLETE
 Filter Operator
-  predicate: (sq_count_check(_col10, true) > 0) (ty

[6/8] hive git commit: HIVE-19358 : CBO decorrelation logic should generate Hive operators (Jesus Camacho Rodriguez via Ashutosh Chauhan)

2018-06-04 Thread hashutosh
http://git-wip-us.apache.org/repos/asf/hive/blob/f567a823/ql/src/test/results/clientpositive/llap/subquery_notin.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/subquery_notin.q.out 
b/ql/src/test/results/clientpositive/llap/subquery_notin.q.out
index d72e8c3..5eabc7d 100644
--- a/ql/src/test/results/clientpositive/llap/subquery_notin.q.out
+++ b/ql/src/test/results/clientpositive/llap/subquery_notin.q.out
@@ -388,7 +388,7 @@ STAGE PLANS:
 outputColumnNames: _col0, _col1, _col2, _col4, _col5, _col8
 Statistics: Num rows: 26 Data size: 5994 Basic stats: COMPLETE 
Column stats: COMPLETE
 Filter Operator
-  predicate: (not CASE WHEN ((_col4 = 0L)) THEN (false) WHEN 
(_col4 is null) THEN (false) WHEN (_col8 is not null) THEN (true) WHEN (_col0 
is null) THEN (null) WHEN ((_col5 < _col4)) THEN (true) ELSE (false) END) 
(type: boolean)
+  predicate: CASE WHEN ((_col4 = 0L)) THEN (true) WHEN (_col4 
is null) THEN (true) WHEN (_col8 is not null) THEN (false) WHEN (_col0 is null) 
THEN (null) WHEN ((_col5 < _col4)) THEN (false) ELSE (true) END (type: boolean)
   Statistics: Num rows: 13 Data size: 3007 Basic stats: 
COMPLETE Column stats: COMPLETE
   Select Operator
 expressions: _col1 (type: string), _col0 (type: string), 
_col2 (type: int)
@@ -492,7 +492,7 @@ STAGE PLANS:
   isPivotResult: true
   Statistics: Num rows: 26 Data size: 12766 Basic stats: 
COMPLETE Column stats: COMPLETE
   Filter Operator
-predicate: (rank_window_0 <= 2) (type: boolean)
+predicate: ((rank_window_0 <= 2) and _col1 is not null) 
(type: boolean)
 Statistics: Num rows: 8 Data size: 3928 Basic stats: 
COMPLETE Column stats: COMPLETE
 Select Operator
   expressions: _col1 (type: string), _col2 (type: string)
@@ -516,19 +516,16 @@ STAGE PLANS:
 mode: mergepartial
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 4 Data size: 876 Basic stats: COMPLETE 
Column stats: COMPLETE
-Filter Operator
-  predicate: _col0 is not null (type: boolean)
-  Statistics: Num rows: 4 Data size: 876 Basic stats: COMPLETE 
Column stats: COMPLETE
-  Select Operator
-expressions: _col0 (type: string), _col1 (type: string), 
true (type: boolean)
-outputColumnNames: _col0, _col1, _col2
+Select Operator
+  expressions: _col0 (type: string), _col1 (type: string), 
true (type: boolean)
+  outputColumnNames: _col0, _col1, _col2
+  Statistics: Num rows: 4 Data size: 892 Basic stats: COMPLETE 
Column stats: COMPLETE
+  Reduce Output Operator
+key expressions: _col0 (type: string), _col1 (type: string)
+sort order: ++
+Map-reduce partition columns: _col0 (type: string), _col1 
(type: string)
 Statistics: Num rows: 4 Data size: 892 Basic stats: 
COMPLETE Column stats: COMPLETE
-Reduce Output Operator
-  key expressions: _col0 (type: string), _col1 (type: 
string)
-  sort order: ++
-  Map-reduce partition columns: _col0 (type: string), 
_col1 (type: string)
-  Statistics: Num rows: 4 Data size: 892 Basic stats: 
COMPLETE Column stats: COMPLETE
-  value expressions: _col2 (type: boolean)
+value expressions: _col2 (type: boolean)
 
   Stage: Stage-0
 Fetch Operator
@@ -932,17 +929,17 @@ STAGE PLANS:
 outputColumnNames: _col0, _col1, _col2, _col4
 Statistics: Num rows: 26 Data size: 5886 Basic stats: COMPLETE 
Column stats: COMPLETE
 Filter Operator
-  predicate: (sq_count_check(_col4, true) > 0) (type: boolean)
+  predicate: (sq_count_check(CASE WHEN (_col4 is null) THEN 
(0) ELSE (_col4) END, true) > 0) (type: boolean)
   Statistics: Num rows: 8 Data size: 1816 Basic stats: 
COMPLETE Column stats: COMPLETE
   Select Operator
 expressions: _col0 (type: string), _col1 (type: string), 
_col2 (type: int)
 outputColumnNames: _col0, _col1, _col2
-Statistics: Num rows: 8 Data size: 1816 Basic stats: 
COMPLETE Column stats: COMPLETE
+Statistics: Num rows: 8 Data size: 1784 Basic stats: 
COMPLETE Column stats: COMPLETE
 Reduce Output Operator
   key expressions: _col1 (type: string)
   sort order: +
 

[1/2] hive git commit: HIVE-19762 : Druid Queries containing Joins gives wrong results (Nishant Bangarwa via Ashutosh Chauhan)

2018-06-04 Thread hashutosh
Repository: hive
Updated Branches:
  refs/heads/branch-3 a4f87134b -> e5f7714ac


http://git-wip-us.apache.org/repos/asf/hive/blob/e5f7714a/ql/src/test/results/clientpositive/druid_basic2.q.out
--
diff --git a/ql/src/test/results/clientpositive/druid_basic2.q.out 
b/ql/src/test/results/clientpositive/druid_basic2.q.out
deleted file mode 100644
index 8c22c94..000
--- a/ql/src/test/results/clientpositive/druid_basic2.q.out
+++ /dev/null
@@ -1,944 +0,0 @@
-PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1_n2
-STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
-TBLPROPERTIES ("druid.datasource" = "wikipedia")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@druid_table_1_n2
-POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_1_n2
-STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
-TBLPROPERTIES ("druid.datasource" = "wikipedia")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@druid_table_1_n2
-PREHOOK: query: DESCRIBE FORMATTED druid_table_1_n2
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@druid_table_1_n2
-POSTHOOK: query: DESCRIBE FORMATTED druid_table_1_n2
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@druid_table_1_n2
-# col_name data_type   comment 
-__time timestamp with local time zone  from deserializer   
-robot  string  from deserializer   
-namespace  string  from deserializer   
-anonymous  string  from deserializer   
-unpatrolledstring  from deserializer   
-page   string  from deserializer   
-language   string  from deserializer   
-newpagestring  from deserializer   
-user   string  from deserializer   
-count  float   from deserializer   
-added  float   from deserializer   
-delta  float   from deserializer   
-variation  float   from deserializer   
-deletedfloat   from deserializer   
-
-# Detailed Table Information
-Database:  default  
- A masked pattern was here 
-Retention: 0
- A masked pattern was here 
-Table Type:EXTERNAL_TABLE   
-Table Parameters:   
-   COLUMN_STATS_ACCURATE   
{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"__time\":\"true\",\"added\":\"true\",\"anonymous\":\"true\",\"count\":\"true\",\"deleted\":\"true\",\"delta\":\"true\",\"language\":\"true\",\"namespace\":\"true\",\"newpage\":\"true\",\"page\":\"true\",\"robot\":\"true\",\"unpatrolled\":\"true\",\"user\":\"true\",\"variation\":\"true\"}}
-   EXTERNALTRUE
-   bucketing_version   2   
-   druid.datasourcewikipedia   
-   numFiles0   
-   numRows 0   
-   rawDataSize 0   
-   storage_handler 
org.apache.hadoop.hive.druid.QTestDruidStorageHandler
-   totalSize   0   
- A masked pattern was here 
-
-# Storage Information   
-SerDe Library: org.apache.hadoop.hive.druid.QTestDruidSerDe 
-InputFormat:   null 
-OutputFormat:  null 
-Compressed:No   
-Num Buckets:   -1   
-Bucket Columns:[]   
-Sort Columns:  []   
-Storage Desc Params:
-   serialization.format1   
-PREHOOK: query: EXPLAIN EXTENDED
-SELECT robot FROM druid_table_1_n2
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
-SELECT robot FROM druid_table_1_n2
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-Fetch Operator
-  limit: -1
-  Processor Tree:
-TableScan
-  alias: druid_table_1_n2
-  properties:
-druid.fieldNames robot
-druid.fieldTypes string
-druid.query.json 
{"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"columns":["robot"],"resultFormat":"compactedList"}
-druid.query.type scan
-  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
-  GatherStats: false
-  Select Operator
-expressions: robot (type: string)
-outputColumn

[2/2] hive git commit: HIVE-19762 : Druid Queries containing Joins gives wrong results (Nishant Bangarwa via Ashutosh Chauhan)

2018-06-04 Thread hashutosh
HIVE-19762 : Druid Queries containing Joins gives wrong results (Nishant 
Bangarwa via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0ebf04c8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0ebf04c8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0ebf04c8

Branch: refs/heads/master
Commit: 0ebf04c87a6d8c36e699148c7f38dd502fe48b66
Parents: 91cdd4f
Author: Nishant Bangarwa 
Authored: Mon Jun 4 20:25:43 2018 -0700
Committer: Ashutosh Chauhan 
Committed: Mon Jun 4 20:56:41 2018 -0700

--
 .../test/resources/testconfiguration.properties |2 +
 .../queries/clientpositive/druidmini_joins.q|   60 +
 .../clientpositive/druid/druid_basic2.q.out | 1051 ++
 .../clientpositive/druid/druidmini_joins.q.out  |  224 
 .../results/clientpositive/druid_basic2.q.out   |  944 
 .../hive/metastore/utils/MetaStoreUtils.java|5 +-
 6 files changed, 1341 insertions(+), 945 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/0ebf04c8/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index 14a93a1..f3cb9de 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -1668,6 +1668,8 @@ spark.perf.disabled.query.files=query14.q,\
   query64.q
 
 druid.query.files=druidmini_test1.q,\
+  druid_basic2.q,\
+  druidmini_joins.q,\
   druidmini_test_insert.q,\
   druidmini_mv.q,\
   druid_timestamptz.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/0ebf04c8/ql/src/test/queries/clientpositive/druidmini_joins.q
--
diff --git a/ql/src/test/queries/clientpositive/druidmini_joins.q 
b/ql/src/test/queries/clientpositive/druidmini_joins.q
new file mode 100644
index 000..720127e
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/druidmini_joins.q
@@ -0,0 +1,60 @@
+SET hive.vectorized.execution.enabled=false;
+SET hive.explain.user=false;
+
+--SET hive.execution.mode=llap;
+
+DROP TABLE druid_table_with_nulls;
+
+CREATE TABLE druid_table_with_nulls
+STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler'
+TBLPROPERTIES ("druid.segment.granularity" = "HOUR")
+AS
+SELECT cast(current_timestamp() AS timestamp with local time zone) AS `__time`,
+   cast(username AS string) AS username,
+   cast(double1 AS double) AS double1,
+   cast(int1 AS int) AS int1
+FROM TABLE (
+  VALUES
+('alfred', 10.30, 2),
+('bob', 3.14, null),
+('bonnie', null, 3),
+('calvin', null, null),
+('charlie', 9.8, 1),
+('charlie', 15.8, 1)) as q (username, double1, int1);
+
+EXPLAIN SELECT
+username AS `username`,
+SUM(double1) AS `sum_double1`
+FROM
+druid_table_with_nulls `tbl1`
+  JOIN (
+SELECT
+username AS `username`,
+SUM(double1) AS `sum_double2`
+FROM druid_table_with_nulls
+GROUP BY `username`
+ORDER BY `sum_double2`
+DESC  LIMIT 10
+  )
+  `tbl2`
+ON (`tbl1`.`username` = `tbl2`.`username`)
+GROUP BY `tbl1`.`username`;
+
+
+SELECT
+username AS `username`,
+SUM(double1) AS `sum_double1`
+FROM
+druid_table_with_nulls `tbl1`
+  JOIN (
+SELECT
+username AS `username`,
+SUM(double1) AS `sum_double2`
+FROM druid_table_with_nulls
+GROUP BY `username`
+ORDER BY `sum_double2`
+DESC  LIMIT 10
+  )
+  `tbl2`
+ON (`tbl1`.`username` = `tbl2`.`username`)
+GROUP BY `tbl1`.`username`;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/0ebf04c8/ql/src/test/results/clientpositive/druid/druid_basic2.q.out
--
diff --git a/ql/src/test/results/clientpositive/druid/druid_basic2.q.out 
b/ql/src/test/results/clientpositive/druid/druid_basic2.q.out
new file mode 100644
index 000..88916b9
--- /dev/null
+++ b/ql/src/test/results/clientpositive/druid/druid_basic2.q.out
@@ -0,0 +1,1051 @@
+PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1_n2
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@druid_table_1_n2
+POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_1_n2
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@druid_table_1_n2
+PREHOOK: query: DESCRIBE FORMATTED druid_table_1_n2
+PREHOOK: type: DESCTABLE
+

[2/2] hive git commit: HIVE-19762 : Druid Queries containing Joins gives wrong results (Nishant Bangarwa via Ashutosh Chauhan)

2018-06-04 Thread hashutosh
HIVE-19762 : Druid Queries containing Joins gives wrong results (Nishant 
Bangarwa via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e5f7714a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e5f7714a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e5f7714a

Branch: refs/heads/branch-3
Commit: e5f7714ac1ecc889bdd34e17707b4a28d8413a24
Parents: a4f8713
Author: Nishant Bangarwa 
Authored: Mon Jun 4 20:25:43 2018 -0700
Committer: Ashutosh Chauhan 
Committed: Mon Jun 4 20:57:49 2018 -0700

--
 .../test/resources/testconfiguration.properties |2 +
 .../queries/clientpositive/druidmini_joins.q|   60 +
 .../clientpositive/druid/druid_basic2.q.out | 1051 ++
 .../clientpositive/druid/druidmini_joins.q.out  |  224 
 .../results/clientpositive/druid_basic2.q.out   |  944 
 .../hive/metastore/utils/MetaStoreUtils.java|5 +-
 6 files changed, 1341 insertions(+), 945 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/e5f7714a/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index 31a0749..00ecd58 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -1681,6 +1681,8 @@ spark.perf.disabled.query.files=query14.q,\
   query64.q
 
 druid.query.files=druidmini_test1.q,\
+  druid_basic2.q,\
+  druidmini_joins.q,\
   druidmini_test_insert.q,\
   druidmini_mv.q,\
   druid_timestamptz.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/e5f7714a/ql/src/test/queries/clientpositive/druidmini_joins.q
--
diff --git a/ql/src/test/queries/clientpositive/druidmini_joins.q 
b/ql/src/test/queries/clientpositive/druidmini_joins.q
new file mode 100644
index 000..720127e
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/druidmini_joins.q
@@ -0,0 +1,60 @@
+SET hive.vectorized.execution.enabled=false;
+SET hive.explain.user=false;
+
+--SET hive.execution.mode=llap;
+
+DROP TABLE druid_table_with_nulls;
+
+CREATE TABLE druid_table_with_nulls
+STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler'
+TBLPROPERTIES ("druid.segment.granularity" = "HOUR")
+AS
+SELECT cast(current_timestamp() AS timestamp with local time zone) AS `__time`,
+   cast(username AS string) AS username,
+   cast(double1 AS double) AS double1,
+   cast(int1 AS int) AS int1
+FROM TABLE (
+  VALUES
+('alfred', 10.30, 2),
+('bob', 3.14, null),
+('bonnie', null, 3),
+('calvin', null, null),
+('charlie', 9.8, 1),
+('charlie', 15.8, 1)) as q (username, double1, int1);
+
+EXPLAIN SELECT
+username AS `username`,
+SUM(double1) AS `sum_double1`
+FROM
+druid_table_with_nulls `tbl1`
+  JOIN (
+SELECT
+username AS `username`,
+SUM(double1) AS `sum_double2`
+FROM druid_table_with_nulls
+GROUP BY `username`
+ORDER BY `sum_double2`
+DESC  LIMIT 10
+  )
+  `tbl2`
+ON (`tbl1`.`username` = `tbl2`.`username`)
+GROUP BY `tbl1`.`username`;
+
+
+SELECT
+username AS `username`,
+SUM(double1) AS `sum_double1`
+FROM
+druid_table_with_nulls `tbl1`
+  JOIN (
+SELECT
+username AS `username`,
+SUM(double1) AS `sum_double2`
+FROM druid_table_with_nulls
+GROUP BY `username`
+ORDER BY `sum_double2`
+DESC  LIMIT 10
+  )
+  `tbl2`
+ON (`tbl1`.`username` = `tbl2`.`username`)
+GROUP BY `tbl1`.`username`;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/e5f7714a/ql/src/test/results/clientpositive/druid/druid_basic2.q.out
--
diff --git a/ql/src/test/results/clientpositive/druid/druid_basic2.q.out 
b/ql/src/test/results/clientpositive/druid/druid_basic2.q.out
new file mode 100644
index 000..88916b9
--- /dev/null
+++ b/ql/src/test/results/clientpositive/druid/druid_basic2.q.out
@@ -0,0 +1,1051 @@
+PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1_n2
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@druid_table_1_n2
+POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_1_n2
+STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
+TBLPROPERTIES ("druid.datasource" = "wikipedia")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@druid_table_1_n2
+PREHOOK: query: DESCRIBE FORMATTED druid_table_1_n2
+PREHOOK: type: DESCTABLE

[1/2] hive git commit: HIVE-19762 : Druid Queries containing Joins gives wrong results (Nishant Bangarwa via Ashutosh Chauhan)

2018-06-04 Thread hashutosh
Repository: hive
Updated Branches:
  refs/heads/master 91cdd4f4c -> 0ebf04c87


http://git-wip-us.apache.org/repos/asf/hive/blob/0ebf04c8/ql/src/test/results/clientpositive/druid_basic2.q.out
--
diff --git a/ql/src/test/results/clientpositive/druid_basic2.q.out 
b/ql/src/test/results/clientpositive/druid_basic2.q.out
deleted file mode 100644
index 8c22c94..000
--- a/ql/src/test/results/clientpositive/druid_basic2.q.out
+++ /dev/null
@@ -1,944 +0,0 @@
-PREHOOK: query: CREATE EXTERNAL TABLE druid_table_1_n2
-STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
-TBLPROPERTIES ("druid.datasource" = "wikipedia")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@druid_table_1_n2
-POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_1_n2
-STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler'
-TBLPROPERTIES ("druid.datasource" = "wikipedia")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@druid_table_1_n2
-PREHOOK: query: DESCRIBE FORMATTED druid_table_1_n2
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@druid_table_1_n2
-POSTHOOK: query: DESCRIBE FORMATTED druid_table_1_n2
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@druid_table_1_n2
-# col_name data_type   comment 
-__time timestamp with local time zone  from deserializer   
-robot  string  from deserializer   
-namespace  string  from deserializer   
-anonymous  string  from deserializer   
-unpatrolledstring  from deserializer   
-page   string  from deserializer   
-language   string  from deserializer   
-newpagestring  from deserializer   
-user   string  from deserializer   
-count  float   from deserializer   
-added  float   from deserializer   
-delta  float   from deserializer   
-variation  float   from deserializer   
-deletedfloat   from deserializer   
-
-# Detailed Table Information
-Database:  default  
- A masked pattern was here 
-Retention: 0
- A masked pattern was here 
-Table Type:EXTERNAL_TABLE   
-Table Parameters:   
-   COLUMN_STATS_ACCURATE   
{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"__time\":\"true\",\"added\":\"true\",\"anonymous\":\"true\",\"count\":\"true\",\"deleted\":\"true\",\"delta\":\"true\",\"language\":\"true\",\"namespace\":\"true\",\"newpage\":\"true\",\"page\":\"true\",\"robot\":\"true\",\"unpatrolled\":\"true\",\"user\":\"true\",\"variation\":\"true\"}}
-   EXTERNALTRUE
-   bucketing_version   2   
-   druid.datasourcewikipedia   
-   numFiles0   
-   numRows 0   
-   rawDataSize 0   
-   storage_handler 
org.apache.hadoop.hive.druid.QTestDruidStorageHandler
-   totalSize   0   
- A masked pattern was here 
-
-# Storage Information   
-SerDe Library: org.apache.hadoop.hive.druid.QTestDruidSerDe 
-InputFormat:   null 
-OutputFormat:  null 
-Compressed:No   
-Num Buckets:   -1   
-Bucket Columns:[]   
-Sort Columns:  []   
-Storage Desc Params:
-   serialization.format1   
-PREHOOK: query: EXPLAIN EXTENDED
-SELECT robot FROM druid_table_1_n2
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
-SELECT robot FROM druid_table_1_n2
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-Fetch Operator
-  limit: -1
-  Processor Tree:
-TableScan
-  alias: druid_table_1_n2
-  properties:
-druid.fieldNames robot
-druid.fieldTypes string
-druid.query.json 
{"queryType":"scan","dataSource":"wikipedia","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"columns":["robot"],"resultFormat":"compactedList"}
-druid.query.type scan
-  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column 
stats: NONE
-  GatherStats: false
-  Select Operator
-expressions: robot (type: string)
-outputColumnNa

hive git commit: HIVE-19334: Use actual file size rather than stats for fetch task optimization with external tables (Jason Dere, reviewed by GopalV)

2018-06-04 Thread jdere
Repository: hive
Updated Branches:
  refs/heads/branch-3 8e90059ab -> a4f87134b


HIVE-19334: Use actual file size rather than stats for fetch task optimization 
with external tables (Jason Dere, reviewed by GopalV)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a4f87134
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a4f87134
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a4f87134

Branch: refs/heads/branch-3
Commit: a4f87134bd015bfce0c58b6bec2ccdab4fddb4d8
Parents: 8e90059
Author: Jason Dere 
Authored: Mon Jun 4 17:28:37 2018 -0700
Committer: Jason Dere 
Committed: Mon Jun 4 17:29:41 2018 -0700

--
 .../hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java | 13 ++---
 1 file changed, 10 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/a4f87134/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java
index 6b46188..ffd47a2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java
@@ -43,6 +43,7 @@ import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.exec.CommonJoinOperator;
 import org.apache.hadoop.hive.ql.exec.FetchTask;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
@@ -523,6 +524,7 @@ public class SimpleFetchOptimizer extends Transform {
 // scanning the filesystem to get file lengths.
 private Status checkThresholdWithMetastoreStats(final Table table, final 
PrunedPartitionList partsList,
   final long threshold) {
+  Status status = Status.UNAVAILABLE;
   if (table != null && !table.isPartitioned()) {
 long dataSize = StatsUtils.getTotalSize(table);
 if (dataSize <= 0) {
@@ -530,7 +532,7 @@ public class SimpleFetchOptimizer extends Transform {
   return Status.UNAVAILABLE;
 }
 
-return (threshold - dataSize) >= 0 ? Status.PASS : Status.FAIL;
+status = (threshold - dataSize) >= 0 ? Status.PASS : Status.FAIL;
   } else if (table != null && table.isPartitioned() && partsList != null) {
 List dataSizes = StatsUtils.getBasicStatForPartitions(table, 
partsList.getNotDeniedPartns(),
   StatsSetupConst.TOTAL_SIZE);
@@ -541,10 +543,15 @@ public class SimpleFetchOptimizer extends Transform {
   return Status.UNAVAILABLE;
 }
 
-return (threshold - totalDataSize) >= 0 ? Status.PASS : Status.FAIL;
+status = (threshold - totalDataSize) >= 0 ? Status.PASS : Status.FAIL;
   }
 
-  return Status.UNAVAILABLE;
+  if (status == Status.PASS && 
MetaStoreUtils.isExternalTable(table.getTTable())) {
+// External table should also check the underlying file size.
+LOG.warn("Table {} is external table, falling back to filesystem 
scan.", table.getCompleteName());
+status = Status.UNAVAILABLE;
+  }
+  return status;
 }
 
 private long getPathLength(JobConf conf, Path path,



hive git commit: HIVE-19334: Use actual file size rather than stats for fetch task optimization with external tables (Jason Dere, reviewed by GopalV)

2018-06-04 Thread jdere
Repository: hive
Updated Branches:
  refs/heads/master 85ac54403 -> 91cdd4f4c


HIVE-19334: Use actual file size rather than stats for fetch task optimization 
with external tables (Jason Dere, reviewed by GopalV)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/91cdd4f4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/91cdd4f4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/91cdd4f4

Branch: refs/heads/master
Commit: 91cdd4f4cf664a1a758e69c8c403d46dc36c076a
Parents: 85ac544
Author: Jason Dere 
Authored: Mon Jun 4 17:28:37 2018 -0700
Committer: Jason Dere 
Committed: Mon Jun 4 17:28:37 2018 -0700

--
 .../hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java | 13 ++---
 1 file changed, 10 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/91cdd4f4/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java
index 6b46188..ffd47a2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java
@@ -43,6 +43,7 @@ import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.exec.CommonJoinOperator;
 import org.apache.hadoop.hive.ql.exec.FetchTask;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
@@ -523,6 +524,7 @@ public class SimpleFetchOptimizer extends Transform {
 // scanning the filesystem to get file lengths.
 private Status checkThresholdWithMetastoreStats(final Table table, final 
PrunedPartitionList partsList,
   final long threshold) {
+  Status status = Status.UNAVAILABLE;
   if (table != null && !table.isPartitioned()) {
 long dataSize = StatsUtils.getTotalSize(table);
 if (dataSize <= 0) {
@@ -530,7 +532,7 @@ public class SimpleFetchOptimizer extends Transform {
   return Status.UNAVAILABLE;
 }
 
-return (threshold - dataSize) >= 0 ? Status.PASS : Status.FAIL;
+status = (threshold - dataSize) >= 0 ? Status.PASS : Status.FAIL;
   } else if (table != null && table.isPartitioned() && partsList != null) {
 List dataSizes = StatsUtils.getBasicStatForPartitions(table, 
partsList.getNotDeniedPartns(),
   StatsSetupConst.TOTAL_SIZE);
@@ -541,10 +543,15 @@ public class SimpleFetchOptimizer extends Transform {
   return Status.UNAVAILABLE;
 }
 
-return (threshold - totalDataSize) >= 0 ? Status.PASS : Status.FAIL;
+status = (threshold - totalDataSize) >= 0 ? Status.PASS : Status.FAIL;
   }
 
-  return Status.UNAVAILABLE;
+  if (status == Status.PASS && 
MetaStoreUtils.isExternalTable(table.getTTable())) {
+// External table should also check the underlying file size.
+LOG.warn("Table {} is external table, falling back to filesystem 
scan.", table.getCompleteName());
+status = Status.UNAVAILABLE;
+  }
+  return status;
 }
 
 private long getPathLength(JobConf conf, Path path,



hive git commit: HIVE-19332: Disable compute.query.using.stats for external table (Jason Dere, reviewed by Ashutosh Chauhan)

2018-06-04 Thread jdere
Repository: hive
Updated Branches:
  refs/heads/branch-3 ab1be561c -> 8e90059ab


HIVE-19332: Disable compute.query.using.stats for external table (Jason Dere, 
reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8e90059a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8e90059a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8e90059a

Branch: refs/heads/branch-3
Commit: 8e90059ab230289268e88aaad4468fb7453ac070
Parents: ab1be56
Author: Jason Dere 
Authored: Mon Jun 4 17:10:01 2018 -0700
Committer: Jason Dere 
Committed: Mon Jun 4 17:10:56 2018 -0700

--
 .../test/resources/testconfiguration.properties |   1 +
 .../hive/ql/optimizer/StatsOptimizer.java   |  17 +-
 .../HiveReduceExpressionsWithStatsRule.java |   7 +-
 .../apache/hadoop/hive/ql/stats/StatsUtils.java |  25 ++
 .../clientpositive/stats_only_external.q|  35 +++
 .../llap/stats_only_external.q.out  | 227 +++
 6 files changed, 301 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/8e90059a/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index 1181461..31a0749 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -686,6 +686,7 @@ minillaplocal.query.files=\
   special_character_in_tabnames_1.q,\
   sqlmerge.q,\
   stats_based_fetch_decision.q,\
+  stats_only_external.q,\
   subquery_in_having.q,\
   subquery_notin.q,\
   subquery_nested_subquery.q, \

http://git-wip-us.apache.org/repos/asf/hive/blob/8e90059a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
index 5788d49..857f300 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils;
 import org.apache.hadoop.hive.ql.plan.FetchWork;
 import org.apache.hadoop.hive.ql.plan.GroupByDesc;
+import org.apache.hadoop.hive.ql.stats.StatsUtils;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFCount;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFMax;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFMin;
@@ -447,7 +448,7 @@ public class StatsOptimizer extends Transform {
   String colName = desc.getColumn();
   StatType type = getType(desc.getTypeString());
   if (!tbl.isPartitioned()) {
-if 
(!StatsSetupConst.areBasicStatsUptoDate(tbl.getParameters())) {
+if (!StatsUtils.areBasicStatsUptoDateForQueryAnswering(tbl, 
tbl.getParameters())) {
   Logger.debug("Stats for table : " + tbl.getTableName() + " 
are not up to date.");
   return null;
 }
@@ -456,7 +457,7 @@ public class StatsOptimizer extends Transform {
   Logger.debug("Table doesn't have up to date stats " + 
tbl.getTableName());
   return null;
 }
-if 
(!StatsSetupConst.areColumnStatsUptoDate(tbl.getParameters(), colName)) {
+if (!StatsUtils.areColumnStatsUptoDateForQueryAnswering(tbl, 
tbl.getParameters(), colName)) {
   Logger.debug("Stats for table : " + tbl.getTableName() + " 
column " + colName
   + " are not up to date.");
   return null;
@@ -479,7 +480,7 @@ public class StatsOptimizer extends Transform {
 Set parts = 
pctx.getPrunedPartitions(tsOp.getConf().getAlias(), tsOp)
 .getPartitions();
 for (Partition part : parts) {
-  if 
(!StatsSetupConst.areBasicStatsUptoDate(part.getParameters())) {
+  if 
(!StatsUtils.areBasicStatsUptoDateForQueryAnswering(part.getTable(), 
part.getParameters())) {
 Logger.debug("Stats for part : " + part.getSpec() + " are 
not up to date.");
 return null;
   }
@@ -517,7 +518,7 @@ public class StatsOptimizer extends Transform {
 String colName = colDesc.getColumn();
 StatType type = getType(colDesc.getTypeString());
 if(!tbl.isPartitioned()) {
-  if (!StatsSetupConst.areColumnStatsUptoDate(tbl.getParameter

hive git commit: HIVE-19332: Disable compute.query.using.stats for external table (Jason Dere, reviewed by Ashutosh Chauhan)

2018-06-04 Thread jdere
Repository: hive
Updated Branches:
  refs/heads/master bf70bd270 -> 85ac54403


HIVE-19332: Disable compute.query.using.stats for external table (Jason Dere, 
reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/85ac5440
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/85ac5440
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/85ac5440

Branch: refs/heads/master
Commit: 85ac544039b7620c41dcc0c743b7cb603cefe26a
Parents: bf70bd2
Author: Jason Dere 
Authored: Mon Jun 4 17:10:01 2018 -0700
Committer: Jason Dere 
Committed: Mon Jun 4 17:10:01 2018 -0700

--
 .../test/resources/testconfiguration.properties |   1 +
 .../hive/ql/optimizer/StatsOptimizer.java   |  17 +-
 .../HiveReduceExpressionsWithStatsRule.java |   7 +-
 .../apache/hadoop/hive/ql/stats/StatsUtils.java |  25 ++
 .../clientpositive/stats_only_external.q|  35 +++
 .../llap/stats_only_external.q.out  | 227 +++
 6 files changed, 301 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/85ac5440/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index fa1a4fb..14a93a1 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -670,6 +670,7 @@ minillaplocal.query.files=\
   special_character_in_tabnames_1.q,\
   sqlmerge.q,\
   stats_based_fetch_decision.q,\
+  stats_only_external.q,\
   subquery_in_having.q,\
   subquery_notin.q,\
   subquery_nested_subquery.q, \

http://git-wip-us.apache.org/repos/asf/hive/blob/85ac5440/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
index 5788d49..857f300 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils;
 import org.apache.hadoop.hive.ql.plan.FetchWork;
 import org.apache.hadoop.hive.ql.plan.GroupByDesc;
+import org.apache.hadoop.hive.ql.stats.StatsUtils;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFCount;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFMax;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFMin;
@@ -447,7 +448,7 @@ public class StatsOptimizer extends Transform {
   String colName = desc.getColumn();
   StatType type = getType(desc.getTypeString());
   if (!tbl.isPartitioned()) {
-if 
(!StatsSetupConst.areBasicStatsUptoDate(tbl.getParameters())) {
+if (!StatsUtils.areBasicStatsUptoDateForQueryAnswering(tbl, 
tbl.getParameters())) {
   Logger.debug("Stats for table : " + tbl.getTableName() + " 
are not up to date.");
   return null;
 }
@@ -456,7 +457,7 @@ public class StatsOptimizer extends Transform {
   Logger.debug("Table doesn't have up to date stats " + 
tbl.getTableName());
   return null;
 }
-if 
(!StatsSetupConst.areColumnStatsUptoDate(tbl.getParameters(), colName)) {
+if (!StatsUtils.areColumnStatsUptoDateForQueryAnswering(tbl, 
tbl.getParameters(), colName)) {
   Logger.debug("Stats for table : " + tbl.getTableName() + " 
column " + colName
   + " are not up to date.");
   return null;
@@ -479,7 +480,7 @@ public class StatsOptimizer extends Transform {
 Set parts = 
pctx.getPrunedPartitions(tsOp.getConf().getAlias(), tsOp)
 .getPartitions();
 for (Partition part : parts) {
-  if 
(!StatsSetupConst.areBasicStatsUptoDate(part.getParameters())) {
+  if 
(!StatsUtils.areBasicStatsUptoDateForQueryAnswering(part.getTable(), 
part.getParameters())) {
 Logger.debug("Stats for part : " + part.getSpec() + " are 
not up to date.");
 return null;
   }
@@ -517,7 +518,7 @@ public class StatsOptimizer extends Transform {
 String colName = colDesc.getColumn();
 StatType type = getType(colDesc.getTypeString());
 if(!tbl.isPartitioned()) {
-  if (!StatsSetupConst.areColumnStatsUptoDate(tbl.getParameters(),

hive git commit: HIVE-19096: query result cache interferes with explain analyze (Jason Dere, reviewed by Zoltan Haindrich)

2018-06-04 Thread jdere
Repository: hive
Updated Branches:
  refs/heads/branch-3 ac5de3d45 -> ab1be561c


HIVE-19096: query result cache interferes with explain analyze (Jason Dere, 
reviewed by Zoltan Haindrich)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ab1be561
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ab1be561
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ab1be561

Branch: refs/heads/branch-3
Commit: ab1be561cda65b8272dc70d6d4823f4a8626e31a
Parents: ac5de3d
Author: Jason Dere 
Authored: Mon Jun 4 16:04:09 2018 -0700
Committer: Jason Dere 
Committed: Mon Jun 4 16:05:12 2018 -0700

--
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  5 ++
 .../queries/clientpositive/results_cache_2.q|  8 ++
 .../clientpositive/llap/results_cache_2.q.out   | 84 
 3 files changed, 97 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/ab1be561/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 506dc39..f3eff0f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -14683,6 +14683,11 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
   return false;
 }
 
+// HIVE-19096 - disable for explain analyze
+if (ctx.getExplainAnalyze() != null) {
+  return false;
+}
+
 return true;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ab1be561/ql/src/test/queries/clientpositive/results_cache_2.q
--
diff --git a/ql/src/test/queries/clientpositive/results_cache_2.q 
b/ql/src/test/queries/clientpositive/results_cache_2.q
index 034ec18..b5b0958 100644
--- a/ql/src/test/queries/clientpositive/results_cache_2.q
+++ b/ql/src/test/queries/clientpositive/results_cache_2.q
@@ -40,3 +40,11 @@ explain
 select c1, count(*)
 from (select current_timestamp c1, value from src where key < 10) q
 group by c1;
+
+-- Test 4: cache disabled for explain analyze
+set test.comment=EXPLAIN ANALYZE should not use the cache. This query just 
previously used the cache in Test 2;
+set test.comment;
+explain analyze
+select c1, count(*)
+from (select sign(value) c1, value from src where key < 10) q
+group by c1;

http://git-wip-us.apache.org/repos/asf/hive/blob/ab1be561/ql/src/test/results/clientpositive/llap/results_cache_2.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/results_cache_2.q.out 
b/ql/src/test/results/clientpositive/llap/results_cache_2.q.out
index a1b2485..25b8dc3 100644
--- a/ql/src/test/results/clientpositive/llap/results_cache_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/results_cache_2.q.out
@@ -178,3 +178,87 @@ STAGE PLANS:
   Processor Tree:
 ListSink
 
+test.comment=EXPLAIN ANALYZE should not use the cache. This query just 
previously used the cache in Test 2
+PREHOOK: query: select c1, count(*)
+from (select sign(value) c1, value from src where key < 10) q
+group by c1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+ A masked pattern was here 
+POSTHOOK: query: select c1, count(*)
+from (select sign(value) c1, value from src where key < 10) q
+group by c1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+ A masked pattern was here 
+PREHOOK: query: explain analyze
+select c1, count(*)
+from (select sign(value) c1, value from src where key < 10) q
+group by c1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain analyze
+select c1, count(*)
+from (select sign(value) c1, value from src where key < 10) q
+group by c1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+Tez
+ A masked pattern was here 
+  Edges:
+Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ A masked pattern was here 
+  Vertices:
+Map 1 
+Map Operator Tree:
+TableScan
+  alias: src
+  Statistics: Num rows: 500/500 Data size: 89000 Basic stats: 
COMPLETE Column stats: COMPLETE
+  Filter Operator
+predicate: (UDFToDouble(key) < 10.0D) (type: boolean)
+Statistics: Num rows: 166/10 Data size: 29548 Basic stats: 
COMPLETE Column stats: COMPLETE
+Select Operator
+  expressions: sign(value) (type: double)
+  outputColumnNames: _col0
+  Statistics: Num rows:

hive git commit: HIVE-19096: query result cache interferes with explain analyze (Jason Dere, reviewed by Zoltan Haindrich)

2018-06-04 Thread jdere
Repository: hive
Updated Branches:
  refs/heads/master 5667af34c -> bf70bd270


HIVE-19096: query result cache interferes with explain analyze (Jason Dere, 
reviewed by Zoltan Haindrich)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bf70bd27
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bf70bd27
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bf70bd27

Branch: refs/heads/master
Commit: bf70bd27047a186e4360226ac65782a735a7929d
Parents: 5667af3
Author: Jason Dere 
Authored: Mon Jun 4 16:04:09 2018 -0700
Committer: Jason Dere 
Committed: Mon Jun 4 16:04:09 2018 -0700

--
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  5 ++
 .../queries/clientpositive/results_cache_2.q|  8 ++
 .../clientpositive/llap/results_cache_2.q.out   | 84 
 3 files changed, 97 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/bf70bd27/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 506dc39..f3eff0f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -14683,6 +14683,11 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
   return false;
 }
 
+// HIVE-19096 - disable for explain analyze
+if (ctx.getExplainAnalyze() != null) {
+  return false;
+}
+
 return true;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/bf70bd27/ql/src/test/queries/clientpositive/results_cache_2.q
--
diff --git a/ql/src/test/queries/clientpositive/results_cache_2.q 
b/ql/src/test/queries/clientpositive/results_cache_2.q
index d939b8e..bc8965a 100644
--- a/ql/src/test/queries/clientpositive/results_cache_2.q
+++ b/ql/src/test/queries/clientpositive/results_cache_2.q
@@ -41,3 +41,11 @@ explain
 select c1, count(*)
 from (select current_timestamp c1, value from src where key < 10) q
 group by c1;
+
+-- Test 4: cache disabled for explain analyze
+set test.comment=EXPLAIN ANALYZE should not use the cache. This query just 
previously used the cache in Test 2;
+set test.comment;
+explain analyze
+select c1, count(*)
+from (select sign(value) c1, value from src where key < 10) q
+group by c1;

http://git-wip-us.apache.org/repos/asf/hive/blob/bf70bd27/ql/src/test/results/clientpositive/llap/results_cache_2.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/results_cache_2.q.out 
b/ql/src/test/results/clientpositive/llap/results_cache_2.q.out
index a1b2485..25b8dc3 100644
--- a/ql/src/test/results/clientpositive/llap/results_cache_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/results_cache_2.q.out
@@ -178,3 +178,87 @@ STAGE PLANS:
   Processor Tree:
 ListSink
 
+test.comment=EXPLAIN ANALYZE should not use the cache. This query just 
previously used the cache in Test 2
+PREHOOK: query: select c1, count(*)
+from (select sign(value) c1, value from src where key < 10) q
+group by c1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+ A masked pattern was here 
+POSTHOOK: query: select c1, count(*)
+from (select sign(value) c1, value from src where key < 10) q
+group by c1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+ A masked pattern was here 
+PREHOOK: query: explain analyze
+select c1, count(*)
+from (select sign(value) c1, value from src where key < 10) q
+group by c1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain analyze
+select c1, count(*)
+from (select sign(value) c1, value from src where key < 10) q
+group by c1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+Tez
+ A masked pattern was here 
+  Edges:
+Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ A masked pattern was here 
+  Vertices:
+Map 1 
+Map Operator Tree:
+TableScan
+  alias: src
+  Statistics: Num rows: 500/500 Data size: 89000 Basic stats: 
COMPLETE Column stats: COMPLETE
+  Filter Operator
+predicate: (UDFToDouble(key) < 10.0D) (type: boolean)
+Statistics: Num rows: 166/10 Data size: 29548 Basic stats: 
COMPLETE Column stats: COMPLETE
+Select Operator
+  expressions: sign(value) (type: double)
+  outputColumnNames: _col0
+  Statistics: Num rows: 166

hive git commit: HIVE-19467: Make storage format configurable for temp tables created using LLAP external client (Jason Dere, reviewed by Deepak Jaiswal)

2018-06-04 Thread jdere
Repository: hive
Updated Branches:
  refs/heads/branch-3 5ec8e356d -> ac5de3d45


HIVE-19467: Make storage format configurable for temp tables created using LLAP 
external client (Jason Dere, reviewed by Deepak Jaiswal)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ac5de3d4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ac5de3d4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ac5de3d4

Branch: refs/heads/branch-3
Commit: ac5de3d45ce33ed61a00f5a2f3637cf755d3ad82
Parents: 5ec8e35
Author: Jason Dere 
Authored: Wed May 9 18:05:50 2018 -0700
Committer: Jason Dere 
Committed: Mon Jun 4 15:30:56 2018 -0700

--
 .../java/org/apache/hadoop/hive/conf/HiveConf.java   |  3 +++
 .../org/apache/hive/jdbc/BaseJdbcWithMiniLlap.java   | 13 +
 .../hive/ql/udf/generic/GenericUDTFGetSplits.java| 15 ++-
 3 files changed, 30 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/ac5de3d4/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 8347f7f..6939dd0 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -4149,6 +4149,9 @@ public class HiveConf extends Configuration {
 
LLAP_DAEMON_OUTPUT_SERVICE_MAX_PENDING_WRITES("hive.llap.daemon.output.service.max.pending.writes",
 8, "Maximum number of queued writes allowed per connection when 
sending data\n" +
 " via the LLAP output service to external clients."),
+
LLAP_EXTERNAL_SPLITS_TEMP_TABLE_STORAGE_FORMAT("hive.llap.external.splits.temp.table.storage.format",
+"orc", new StringSet("default", "text", "orc"),
+"Storage format for temp tables created using LLAP external client"),
 LLAP_ENABLE_GRACE_JOIN_IN_LLAP("hive.llap.enable.grace.join.in.llap", 
false,
 "Override if grace join should be allowed to run in llap."),
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ac5de3d4/itests/hive-unit/src/test/java/org/apache/hive/jdbc/BaseJdbcWithMiniLlap.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/BaseJdbcWithMiniLlap.java 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/BaseJdbcWithMiniLlap.java
index 11017f6..7a891ef 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/BaseJdbcWithMiniLlap.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/BaseJdbcWithMiniLlap.java
@@ -448,6 +448,19 @@ public abstract class BaseJdbcWithMiniLlap {
 assertArrayEquals("X'01FF'".getBytes("UTF-8"), (byte[]) rowValues[22]);
   }
 
+
+  @Test(timeout = 6)
+  public void testComplexQuery() throws Exception {
+createTestTable("testtab1");
+
+RowCollector rowCollector = new RowCollector();
+String query = "select value, count(*) from testtab1 where under_col=0 
group by value";
+int rowCount = processQuery(query, 1, rowCollector);
+assertEquals(1, rowCount);
+
+assertArrayEquals(new String[] {"val_0", "3"}, rowCollector.rows.get(0));
+  }
+
   private interface RowProcessor {
 void process(Row row);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ac5de3d4/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java
index 7dbde7a..20d0961 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java
@@ -293,7 +293,8 @@ public class GenericUDTFGetSplits extends GenericUDTF {
 
 String tableName = 
"table_"+UUID.randomUUID().toString().replaceAll("[^A-Za-z0-9 ]", "");
 
-String ctas = "create temporary table " + tableName + " as " + query;
+String storageFormatString = getTempTableStorageFormatString(conf);
+String ctas = "create temporary table " + tableName + " " + 
storageFormatString + " as " + query;
 LOG.info("Materializing the query for LLAPIF; CTAS: " + ctas);
 driver.releaseResources();
 HiveConf.setVar(conf, ConfVars.HIVE_EXECUTION_MODE, originalMode);
@@ -674,6 +675,18 @@ public class GenericUDTFGetSplits extends GenericUDTF {
 return Schema;
   }
 
+  private String getTempTableStorageFormatString(HiveConf conf) {
+String formatString = "";
+String storageFormatOption =
+
conf.g

[1/4] hive git commit: HIVE-19690 : multi-insert query with multiple GBY, and distinct in only some branches can produce incorrect results (Sergey Shelukhin, reviewed by Ashutosh Chauhan)

2018-06-04 Thread sershe
Repository: hive
Updated Branches:
  refs/heads/branch-3 c2cc42c1c -> 5ec8e356d
  refs/heads/master 43e331e35 -> 5667af34c


http://git-wip-us.apache.org/repos/asf/hive/blob/5667af34/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out 
b/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out
index 9c4cdec..113ff46 100644
--- a/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out
+++ b/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out
@@ -47,10 +47,11 @@ STAGE PLANS:
   Stage: Stage-2
 Spark
   Edges:
-Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 2)
+Reducer 2 <- Map 4 (GROUP PARTITION-LEVEL SORT, 2)
+Reducer 3 <- Map 5 (GROUP, 2)
  A masked pattern was here 
   Vertices:
-Map 1 
+Map 4 
 Map Operator Tree:
 TableScan
   alias: src
@@ -59,53 +60,84 @@ STAGE PLANS:
 expressions: key (type: string), UDFToDouble(key) (type: 
double), value (type: string)
 outputColumnNames: _col0, _col1, _col2
 Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string), _col2 (type: 
string)
-  sort order: ++
-  Map-reduce partition columns: _col0 (type: string)
+Select Operator
+  expressions: _col0 (type: string), _col2 (type: string)
+  outputColumnNames: _col0, _col2
   Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-  value expressions: _col1 (type: double)
+  Group By Operator
+aggregations: count(DISTINCT _col2)
+keys: _col0 (type: string), _col2 (type: string)
+mode: hash
+outputColumnNames: _col0, _col1, _col2
+Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+Reduce Output Operator
+  key expressions: _col0 (type: string), _col1 (type: 
string)
+  sort order: ++
+  Map-reduce partition columns: _col0 (type: string)
+  Statistics: Num rows: 500 Data size: 5312 Basic 
stats: COMPLETE Column stats: NONE
+Map 5 
+Map Operator Tree:
+TableScan
+  alias: src
+  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+  Select Operator
+expressions: key (type: string), UDFToDouble(key) (type: 
double), value (type: string)
+outputColumnNames: _col0, _col1, _col2
+Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+Group By Operator
+  aggregations: sum(_col1)
+  keys: _col0 (type: string), _col2 (type: string)
+  mode: hash
+  outputColumnNames: _col0, _col1, _col2
+  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+  Reduce Output Operator
+key expressions: _col0 (type: string), _col1 (type: 
string)
+sort order: ++
+Map-reduce partition columns: _col0 (type: string), 
_col1 (type: string)
+Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+value expressions: _col2 (type: double)
 Execution mode: vectorized
 Reducer 2 
 Reduce Operator Tree:
-  Forward
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-Group By Operator
-  aggregations: count(DISTINCT KEY._col1:0._col0)
-  keys: KEY._col0 (type: string)
-  mode: complete
+  Group By Operator
+aggregations: count(DISTINCT KEY._col1:0._col0)
+keys: KEY._col0 (type: string)
+mode: mergepartial
+outputColumnNames: _col0, _col1
+Statistics: Num rows: 250 Data size: 2656 Basic stats: 
COMPLETE Column stats: NONE
+Select Operator
+  expressions: _col0 (type: string), UDFToDouble(_col1) (type: 
double)
   outputColumnNames: _col0, _col1
   Statistics: Num rows: 250 Data size: 2656 Basic stats: 
COM

[2/4] hive git commit: HIVE-19690 : multi-insert query with multiple GBY, and distinct in only some branches can produce incorrect results (Sergey Shelukhin, reviewed by Ashutosh Chauhan)

2018-06-04 Thread sershe
HIVE-19690 : multi-insert query with multiple GBY, and distinct in only some 
branches can produce incorrect results (Sergey Shelukhin, reviewed by Ashutosh 
Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5667af34
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5667af34
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5667af34

Branch: refs/heads/master
Commit: 5667af34c4cf36cc41aaf39a8e42f4dad42d2cee
Parents: 43e331e
Author: sergey 
Authored: Mon Jun 4 14:42:06 2018 -0700
Committer: sergey 
Committed: Mon Jun 4 14:42:06 2018 -0700

--
 .../hadoop/hive/ql/exec/KeyWrapperFactory.java  |  12 +
 .../apache/hadoop/hive/ql/exec/MapOperator.java |   5 +
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  73 ++-
 .../clientpositive/multi_insert_distinct.q  |  66 ++
 .../clientpositive/multi_insert_distinct.q.out  | 534 +
 .../clientpositive/multi_insert_gby3.q.out  | 597 +++
 .../spark/multi_insert_gby3.q.out   | 344 +++
 7 files changed, 1228 insertions(+), 403 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/5667af34/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java
index 3c7f0b7..71ee25d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.hive.ql.exec;
 
+import java.util.Arrays;
+
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.serde2.objectinspector.ListObjectsEqualComparer;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -65,6 +67,11 @@ public class KeyWrapperFactory {
   class ListKeyWrapper extends KeyWrapper {
 int hashcode = -1;
 Object[] keys;
+@Override
+public String toString() {
+  return "ListKeyWrapper [keys=" + Arrays.toString(keys) + "]";
+}
+
 // decide whether this is already in hashmap (keys in hashmap are 
deepcopied
 // version, and we need to use 'currentKeyObjectInspector').
 ListObjectsEqualComparer equalComparer;
@@ -165,6 +172,11 @@ public class KeyWrapperFactory {
   transient StringObjectInspector soi_new, soi_copy;
 
   class TextKeyWrapper extends KeyWrapper {
+@Override
+public String toString() {
+  return "TextKeyWrapper [key=" + key + "]";
+}
+
 int hashcode;
 Object key;
 boolean isCopy;

http://git-wip-us.apache.org/repos/asf/hive/blob/5667af34/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
index 29f3579..16d7c51 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
@@ -90,6 +90,11 @@ public class MapOperator extends AbstractMapOperator {
 
   protected static class MapOpCtx {
 
+@Override
+public String toString() {
+  return "[alias=" + alias + ", op=" + op + "]";
+}
+
 final String alias;
 final Operator op;
 final PartitionDesc partDesc;

http://git-wip-us.apache.org/repos/asf/hive/blob/5667af34/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 66f4b67..506dc39 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -10213,62 +10213,62 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
 
 List> inputOperators =
 new ArrayList>(ks.size());
-List> sprayKeyLists = new 
ArrayList>(ks.size());
-List> distinctKeyLists = new 
ArrayList>(ks.size());
+// We will try to combine multiple clauses into a smaller number with 
compatible keys.
+List> newSprayKeyLists = new 
ArrayList>(ks.size());
+List> newDistinctKeyLists = new 
ArrayList>(ks.size());
 
 // Iterate over each clause
 for (String dest : ks) {
   Operator input = inputs.get(dest);
   RowResolver inputRR = opParseCtx.get(input).getRowResolver();
 
-  List distinctKeys = getDistinctExprs(qbp, dest, inputRR);
-  List sprayKeys = new ArrayList();
+  // Determine the keys for the current cla

[3/4] hive git commit: HIVE-19690 : multi-insert query with multiple GBY, and distinct in only some branches can produce incorrect results (Sergey Shelukhin, reviewed by Ashutosh Chauhan)

2018-06-04 Thread sershe
http://git-wip-us.apache.org/repos/asf/hive/blob/5ec8e356/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out 
b/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out
index 9c4cdec..113ff46 100644
--- a/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out
+++ b/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out
@@ -47,10 +47,11 @@ STAGE PLANS:
   Stage: Stage-2
 Spark
   Edges:
-Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 2)
+Reducer 2 <- Map 4 (GROUP PARTITION-LEVEL SORT, 2)
+Reducer 3 <- Map 5 (GROUP, 2)
  A masked pattern was here 
   Vertices:
-Map 1 
+Map 4 
 Map Operator Tree:
 TableScan
   alias: src
@@ -59,53 +60,84 @@ STAGE PLANS:
 expressions: key (type: string), UDFToDouble(key) (type: 
double), value (type: string)
 outputColumnNames: _col0, _col1, _col2
 Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string), _col2 (type: 
string)
-  sort order: ++
-  Map-reduce partition columns: _col0 (type: string)
+Select Operator
+  expressions: _col0 (type: string), _col2 (type: string)
+  outputColumnNames: _col0, _col2
   Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-  value expressions: _col1 (type: double)
+  Group By Operator
+aggregations: count(DISTINCT _col2)
+keys: _col0 (type: string), _col2 (type: string)
+mode: hash
+outputColumnNames: _col0, _col1, _col2
+Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+Reduce Output Operator
+  key expressions: _col0 (type: string), _col1 (type: 
string)
+  sort order: ++
+  Map-reduce partition columns: _col0 (type: string)
+  Statistics: Num rows: 500 Data size: 5312 Basic 
stats: COMPLETE Column stats: NONE
+Map 5 
+Map Operator Tree:
+TableScan
+  alias: src
+  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+  Select Operator
+expressions: key (type: string), UDFToDouble(key) (type: 
double), value (type: string)
+outputColumnNames: _col0, _col1, _col2
+Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+Group By Operator
+  aggregations: sum(_col1)
+  keys: _col0 (type: string), _col2 (type: string)
+  mode: hash
+  outputColumnNames: _col0, _col1, _col2
+  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+  Reduce Output Operator
+key expressions: _col0 (type: string), _col1 (type: 
string)
+sort order: ++
+Map-reduce partition columns: _col0 (type: string), 
_col1 (type: string)
+Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+value expressions: _col2 (type: double)
 Execution mode: vectorized
 Reducer 2 
 Reduce Operator Tree:
-  Forward
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-Group By Operator
-  aggregations: count(DISTINCT KEY._col1:0._col0)
-  keys: KEY._col0 (type: string)
-  mode: complete
+  Group By Operator
+aggregations: count(DISTINCT KEY._col1:0._col0)
+keys: KEY._col0 (type: string)
+mode: mergepartial
+outputColumnNames: _col0, _col1
+Statistics: Num rows: 250 Data size: 2656 Basic stats: 
COMPLETE Column stats: NONE
+Select Operator
+  expressions: _col0 (type: string), UDFToDouble(_col1) (type: 
double)
   outputColumnNames: _col0, _col1
   Statistics: Num rows: 250 Data size: 2656 Basic stats: 
COMPLETE Column stats: NONE
-  Select Operator
-expressions: _col0 (type: string), UDFToDoub

[4/4] hive git commit: HIVE-19690 : multi-insert query with multiple GBY, and distinct in only some branches can produce incorrect results (Sergey Shelukhin, reviewed by Ashutosh Chauhan)

2018-06-04 Thread sershe
HIVE-19690 : multi-insert query with multiple GBY, and distinct in only some 
branches can produce incorrect results (Sergey Shelukhin, reviewed by Ashutosh 
Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5ec8e356
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5ec8e356
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5ec8e356

Branch: refs/heads/branch-3
Commit: 5ec8e356d88318ebebbd3e7e320eae2b8fd20218
Parents: c2cc42c
Author: sergey 
Authored: Mon Jun 4 14:42:06 2018 -0700
Committer: sergey 
Committed: Mon Jun 4 14:45:05 2018 -0700

--
 .../hadoop/hive/ql/exec/KeyWrapperFactory.java  |  12 +
 .../apache/hadoop/hive/ql/exec/MapOperator.java |   5 +
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  73 ++-
 .../clientpositive/multi_insert_distinct.q  |  66 ++
 .../clientpositive/multi_insert_distinct.q.out  | 534 +
 .../clientpositive/multi_insert_gby3.q.out  | 597 +++
 .../spark/multi_insert_gby3.q.out   | 344 +++
 7 files changed, 1228 insertions(+), 403 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/5ec8e356/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java
index 3c7f0b7..71ee25d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/KeyWrapperFactory.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.hive.ql.exec;
 
+import java.util.Arrays;
+
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.serde2.objectinspector.ListObjectsEqualComparer;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -65,6 +67,11 @@ public class KeyWrapperFactory {
   class ListKeyWrapper extends KeyWrapper {
 int hashcode = -1;
 Object[] keys;
+@Override
+public String toString() {
+  return "ListKeyWrapper [keys=" + Arrays.toString(keys) + "]";
+}
+
 // decide whether this is already in hashmap (keys in hashmap are 
deepcopied
 // version, and we need to use 'currentKeyObjectInspector').
 ListObjectsEqualComparer equalComparer;
@@ -165,6 +172,11 @@ public class KeyWrapperFactory {
   transient StringObjectInspector soi_new, soi_copy;
 
   class TextKeyWrapper extends KeyWrapper {
+@Override
+public String toString() {
+  return "TextKeyWrapper [key=" + key + "]";
+}
+
 int hashcode;
 Object key;
 boolean isCopy;

http://git-wip-us.apache.org/repos/asf/hive/blob/5ec8e356/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
index 29f3579..16d7c51 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
@@ -90,6 +90,11 @@ public class MapOperator extends AbstractMapOperator {
 
   protected static class MapOpCtx {
 
+@Override
+public String toString() {
+  return "[alias=" + alias + ", op=" + op + "]";
+}
+
 final String alias;
 final Operator op;
 final PartitionDesc partDesc;

http://git-wip-us.apache.org/repos/asf/hive/blob/5ec8e356/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 66f4b67..506dc39 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -10213,62 +10213,62 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
 
 List> inputOperators =
 new ArrayList>(ks.size());
-List> sprayKeyLists = new 
ArrayList>(ks.size());
-List> distinctKeyLists = new 
ArrayList>(ks.size());
+// We will try to combine multiple clauses into a smaller number with 
compatible keys.
+List> newSprayKeyLists = new 
ArrayList>(ks.size());
+List> newDistinctKeyLists = new 
ArrayList>(ks.size());
 
 // Iterate over each clause
 for (String dest : ks) {
   Operator input = inputs.get(dest);
   RowResolver inputRR = opParseCtx.get(input).getRowResolver();
 
-  List distinctKeys = getDistinctExprs(qbp, dest, inputRR);
-  List sprayKeys = new ArrayList();
+  // Determine the keys for the current c

[2/2] hive git commit: HIVE-19597 : TestWorkloadManager sometimes hangs (Sergey Shelukhin, reviewed by Prasanth Jayachandran)

2018-06-04 Thread sershe
HIVE-19597 : TestWorkloadManager sometimes hangs (Sergey Shelukhin, reviewed by 
Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/283e51d2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/283e51d2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/283e51d2

Branch: refs/heads/master
Commit: 283e51d25706bb8f7dfae051d346c16369465329
Parents: 2d3faea
Author: sergey 
Authored: Mon Jun 4 11:27:22 2018 -0700
Committer: sergey 
Committed: Mon Jun 4 13:31:22 2018 -0700

--
 .../hive/ql/exec/tez/WorkloadManager.java   | 23 ++
 .../hive/ql/exec/tez/TestWorkloadManager.java   | 45 +++-
 2 files changed, 40 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/283e51d2/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java
index 97ba036..7137a17 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java
@@ -339,7 +339,8 @@ public class WorkloadManager extends 
TezSessionPoolSession.AbstractTriggerValida
 private WMFullResourcePlan resourcePlanToApply = null;
 private boolean doClearResourcePlan = false;
 private boolean hasClusterStateChanged = false;
-private SettableFuture testEvent, applyRpFuture;
+private List> testEvents = new LinkedList<>();
+private SettableFuture applyRpFuture;
 private SettableFuture> dumpStateFuture;
 private final List moveSessions = new LinkedList<>();
   }
@@ -401,10 +402,11 @@ public class WorkloadManager extends 
TezSessionPoolSession.AbstractTriggerValida
 return;
   } catch (Exception | AssertionError ex) {
 LOG.error("WM thread encountered an error but will attempt to 
continue", ex);
-if (currentEvents.testEvent != null) {
-  currentEvents.testEvent.setException(ex);
-  currentEvents.testEvent = null;
+for (SettableFuture testEvent : currentEvents.testEvents) {
+  LOG.info("Failing test event " + System.identityHashCode(testEvent));
+  testEvent.setException(ex);
 }
+currentEvents.testEvents.clear();
 if (currentEvents.applyRpFuture != null) {
   currentEvents.applyRpFuture.setException(ex);
   currentEvents.applyRpFuture = null;
@@ -721,12 +723,14 @@ public class WorkloadManager extends 
TezSessionPoolSession.AbstractTriggerValida
   e.dumpStateFuture.set(result);
   e.dumpStateFuture = null;
 }
-
+
 // 15. Notify tests and global async ops.
-if (e.testEvent != null) {
-  e.testEvent.set(true);
-  e.testEvent = null;
+for (SettableFuture testEvent : e.testEvents) {
+  LOG.info("Triggering test event " + System.identityHashCode(testEvent));
+  testEvent.set(null);
 }
+e.testEvents.clear();
+
 if (e.applyRpFuture != null) {
   e.applyRpFuture.set(true);
   e.applyRpFuture = null;
@@ -1552,7 +1556,8 @@ public class WorkloadManager extends 
TezSessionPoolSession.AbstractTriggerValida
 SettableFuture testEvent = SettableFuture.create();
 currentLock.lock();
 try {
-  current.testEvent = testEvent;
+  LOG.info("Adding test event " + System.identityHashCode(testEvent));
+  current.testEvents.add(testEvent);
   notifyWmThreadUnderLock();
 } finally {
   currentLock.unlock();

http://git-wip-us.apache.org/repos/asf/hive/blob/283e51d2/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java
--
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java 
b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java
index 6e15b2c..30ad212 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java
@@ -60,7 +60,6 @@ import org.junit.runner.RunWith;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-@RunWith(RetryTestRunner.class)
 public class TestWorkloadManager {
   @SuppressWarnings("unused")
   private static final Logger LOG = 
LoggerFactory.getLogger(TestWorkloadManager.class);
@@ -90,8 +89,10 @@ public class TestWorkloadManager {
   if (cdl != null) {
 cdl.countDown();
   }
+  LOG.info("About to call get with " + old);
   try {
session.set((WmTezSession) wm.getSession(old, mappingInput(userName), 
conf));
+   LOG.info("Received " + session.get());
   } catch (Thro

[1/2] hive git commit: HIVE-19663 : refactor LLAP IO report generation (Sergey Shelukhin, reviewed by Prasanth Jayachandran)

2018-06-04 Thread sershe
Repository: hive
Updated Branches:
  refs/heads/master 2d3faead7 -> 43e331e35


HIVE-19663 : refactor LLAP IO report generation (Sergey Shelukhin, reviewed by 
Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/43e331e3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/43e331e3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/43e331e3

Branch: refs/heads/master
Commit: 43e331e35ec17479c204ab6b310e49d68cb4a2e9
Parents: 283e51d
Author: sergey 
Authored: Mon Jun 4 11:28:50 2018 -0700
Committer: sergey 
Committed: Mon Jun 4 13:31:22 2018 -0700

--
 .../hadoop/hive/llap/cache/BuddyAllocator.java  |  9 +--
 .../hive/llap/cache/CacheContentsTracker.java   | 11 
 .../hive/llap/cache/EvictionDispatcher.java | 25 +---
 .../hadoop/hive/llap/cache/LlapIoDebugDump.java | 23 +++
 .../hive/llap/cache/LlapOomDebugDump.java   | 24 
 .../hadoop/hive/llap/cache/LowLevelCache.java   |  2 +-
 .../hive/llap/cache/LowLevelCacheImpl.java  | 28 +
 .../llap/cache/LowLevelCacheMemoryManager.java  | 12 
 .../hive/llap/cache/LowLevelCachePolicy.java|  3 +-
 .../llap/cache/LowLevelFifoCachePolicy.java | 28 -
 .../llap/cache/LowLevelLrfuCachePolicy.java | 18 --
 .../hadoop/hive/llap/cache/MemoryManager.java   |  2 +-
 .../hive/llap/cache/SerDeLowLevelCacheImpl.java | 20 +--
 .../hive/llap/cache/SimpleBufferManager.java| 10 
 .../hive/llap/io/api/impl/LlapIoImpl.java   | 63 ++--
 .../hive/llap/io/metadata/MetadataCache.java| 10 +---
 .../hive/llap/cache/TestBuddyAllocator.java |  9 ---
 .../hive/llap/cache/TestLowLevelCacheImpl.java  |  6 +-
 .../hive/llap/cache/TestOrcMetadataCache.java   | 15 +
 19 files changed, 78 insertions(+), 240 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/43e331e3/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
--
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
index f4a549c..fcfc22a 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
@@ -47,7 +47,7 @@ import org.apache.hadoop.hive.llap.io.api.impl.LlapIoImpl;
 import org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics;
 
 public final class BuddyAllocator
-  implements EvictionAwareAllocator, BuddyAllocatorMXBean, LlapOomDebugDump {
+  implements EvictionAwareAllocator, BuddyAllocatorMXBean, LlapIoDebugDump {
   private final Arena[] arenas;
   private final AtomicInteger allocatedArenas = new AtomicInteger(0);
 
@@ -653,7 +653,6 @@ public final class BuddyAllocator
*/
   @Override
   public void debugDumpShort(StringBuilder sb) {
-memoryManager.debugDumpShort(sb);
 sb.append("\nDefrag counters: ");
 for (int i = 0; i < defragCounters.length; ++i) {
   sb.append(defragCounters[i].get()).append(", ");
@@ -1558,12 +1557,6 @@ public final class BuddyAllocator
 return sb.toString();
   }
 
-  @Override
-  public String debugDumpForOom() {
-return "\nALLOCATOR STATE:\n" + debugDumpForOomInternal()
-+ "\nPARENT STATE:\n" + memoryManager.debugDumpForOom();
-  }
-
   private String debugDumpForOomInternal() {
 StringBuilder sb = new StringBuilder();
 for (Arena a : arenas) {

http://git-wip-us.apache.org/repos/asf/hive/blob/43e331e3/llap-server/src/java/org/apache/hadoop/hive/llap/cache/CacheContentsTracker.java
--
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/CacheContentsTracker.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/CacheContentsTracker.java
index 6a361fa..64c0125 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/CacheContentsTracker.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/CacheContentsTracker.java
@@ -179,11 +179,6 @@ public class CacheContentsTracker implements 
LowLevelCachePolicy, EvictionListen
   }
 
   @Override
-  public void setParentDebugDumper(LlapOomDebugDump dumper) {
-realPolicy.setParentDebugDumper(dumper);
-  }
-
-  @Override
   public long purge() {
 return realPolicy.purge();
   }
@@ -195,11 +190,6 @@ public class CacheContentsTracker implements 
LowLevelCachePolicy, EvictionListen
   }
 
   @Override
-  public String debugDumpForOom() {
-return realPolicy.debugDumpForOom();
-  }
-
-  @Override
   public void debugDumpShort(StringBuilder sb) {
 sb.append("\nCache state: ");
 for (TagState state : tagInfo.values()) {
@@ -209,7 +199

hive git commit: HIVE-19759: Flaky test: TestRpc#testServerPort (Sahil Takiar, reviewed by Peter Vary)

2018-06-04 Thread stakiar
Repository: hive
Updated Branches:
  refs/heads/master d682ca926 -> 2d3faead7


HIVE-19759: Flaky test: TestRpc#testServerPort (Sahil Takiar, reviewed by Peter 
Vary)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2d3faead
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2d3faead
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2d3faead

Branch: refs/heads/master
Commit: 2d3faead7f15200b21dd21e7320e0f3853878f71
Parents: d682ca9
Author: Sahil Takiar 
Authored: Fri Jun 1 08:15:27 2018 -0500
Committer: Sahil Takiar 
Committed: Mon Jun 4 15:24:19 2018 -0500

--
 .../apache/hadoop/hive/common/ServerUtils.java  |  8 
 .../apache/hive/spark/client/rpc/TestRpc.java   | 39 
 2 files changed, 39 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/2d3faead/common/src/java/org/apache/hadoop/hive/common/ServerUtils.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/common/ServerUtils.java 
b/common/src/java/org/apache/hadoop/hive/common/ServerUtils.java
index 7979bbe..d7f4b14 100644
--- a/common/src/java/org/apache/hadoop/hive/common/ServerUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/ServerUtils.java
@@ -18,7 +18,9 @@
 
 package org.apache.hadoop.hive.common;
 
+import java.io.IOException;
 import java.net.InetAddress;
+import java.net.ServerSocket;
 import java.net.UnknownHostException;
 
 import org.slf4j.Logger;
@@ -77,4 +79,10 @@ public class ServerUtils {
 }
   }
 
+  public static int findFreePort() throws IOException {
+ServerSocket socket= new ServerSocket(0);
+int port = socket.getLocalPort();
+socket.close();
+return port;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/2d3faead/spark-client/src/test/java/org/apache/hive/spark/client/rpc/TestRpc.java
--
diff --git 
a/spark-client/src/test/java/org/apache/hive/spark/client/rpc/TestRpc.java 
b/spark-client/src/test/java/org/apache/hive/spark/client/rpc/TestRpc.java
index 5653e4d..013bcff 100644
--- a/spark-client/src/test/java/org/apache/hive/spark/client/rpc/TestRpc.java
+++ b/spark-client/src/test/java/org/apache/hive/spark/client/rpc/TestRpc.java
@@ -46,6 +46,7 @@ import io.netty.channel.nio.NioEventLoopGroup;
 import io.netty.util.concurrent.Future;
 
 import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.hive.common.ServerUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -62,6 +63,7 @@ public class TestRpc {
   private Collection closeables;
   private static final Map emptyConfig =
   ImmutableMap.of(HiveConf.ConfVars.SPARK_RPC_CHANNEL_LOG_LEVEL.varname, 
"DEBUG");
+  private static final int RETRY_ACQUIRE_PORT_COUNT = 10;
 
   @Before
   public void setUp() {
@@ -187,10 +189,21 @@ public class TestRpc {
 assertTrue("Port should be within configured port range:" + 
server1.getPort(), server1.getPort() >= 49152 && server1.getPort() <= 49333);
 IOUtils.closeQuietly(server1);
 
-int expectedPort = 65535;
-config.put(HiveConf.ConfVars.SPARK_RPC_SERVER_PORT.varname, 
String.valueOf(expectedPort));
-RpcServer server2 = new RpcServer(config);
-assertTrue("Port should match configured one: " + server2.getPort(), 
server2.getPort() == expectedPort);
+int expectedPort = ServerUtils.findFreePort();
+RpcServer server2 = null;
+for (int i = 0; i < RETRY_ACQUIRE_PORT_COUNT; i++) {
+  try {
+config.put(HiveConf.ConfVars.SPARK_RPC_SERVER_PORT.varname, 
String.valueOf(expectedPort));
+server2 = new RpcServer(config);
+break;
+  } catch (Exception e) {
+LOG.debug("Error while connecting to port " + expectedPort + " 
retrying: " + e.getMessage());
+expectedPort = ServerUtils.findFreePort();
+  }
+}
+
+assertNotNull("Unable to create RpcServer with any attempted port", 
server2);
+assertEquals("Port should match configured one: " + server2.getPort(), 
expectedPort, server2.getPort());
 IOUtils.closeQuietly(server2);
 
 config.put(HiveConf.ConfVars.SPARK_RPC_SERVER_PORT.varname, 
"49552-49222,49223,49224-49333");
@@ -204,10 +217,20 @@ public class TestRpc {
 }
 
 // Retry logic
-expectedPort = 65535;
-config.put(HiveConf.ConfVars.SPARK_RPC_SERVER_PORT.varname, 
String.valueOf(expectedPort) + ",21-23");
-RpcServer server3 = new RpcServer(config);
-assertTrue("Port should match configured one:" + server3.getPort(), 
server3.getPort() == expectedPort);
+expectedPort = ServerUtils.findFreePort();
+RpcServer server3 = null;
+for (int i = 0; i < RETRY_ACQUIRE_PORT_COUNT; i++) {
+  try {
+con

hive git commit: HIVE-18652: Print Spark metrics on console (Sahil Takiar, reviewed by Vihang Karajgaonkar)

2018-06-04 Thread stakiar
Repository: hive
Updated Branches:
  refs/heads/master 2028749b1 -> d682ca926


HIVE-18652: Print Spark metrics on console (Sahil Takiar, reviewed by Vihang 
Karajgaonkar)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d682ca92
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d682ca92
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d682ca92

Branch: refs/heads/master
Commit: d682ca9266df182e977b35ab47771dbac2ec
Parents: 2028749
Author: Sahil Takiar 
Authored: Mon Jun 4 13:36:04 2018 -0500
Committer: Sahil Takiar 
Committed: Mon Jun 4 13:36:48 2018 -0500

--
 .../hive/ql/exec/spark/TestSparkStatistics.java |  2 +-
 .../hadoop/hive/ql/exec/spark/SparkTask.java| 83 +++-
 .../spark/Statistic/SparkStatisticGroup.java|  4 +
 .../spark/Statistic/SparkStatisticsNames.java   | 25 --
 .../spark/status/impl/SparkMetricsUtils.java| 37 ++---
 .../hive/spark/client/MetricsCollection.java| 20 -
 .../hive/spark/client/metrics/InputMetrics.java | 12 ++-
 .../client/metrics/ShuffleReadMetrics.java  | 21 -
 .../client/metrics/ShuffleWriteMetrics.java | 11 ++-
 .../spark/client/TestMetricsCollection.java | 15 ++--
 10 files changed, 190 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/d682ca92/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSparkStatistics.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSparkStatistics.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSparkStatistics.java
index 4413161..f6c5b17 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSparkStatistics.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSparkStatistics.java
@@ -81,7 +81,7 @@ public class TestSparkStatistics {
   List sparkStats = 
Lists.newArrayList(sparkTask.getSparkStatistics()
   
.getStatisticGroup(SparkStatisticsNames.SPARK_GROUP_NAME).getStatistics());
 
-  Assert.assertEquals(18, sparkStats.size());
+  Assert.assertEquals(24, sparkStats.size());
 
   Map statsMap = sparkStats.stream().collect(
   Collectors.toMap(SparkStatistic::getName, 
SparkStatistic::getValue));

http://git-wip-us.apache.org/repos/asf/hive/blob/d682ca92/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
index 8038771..ddbb6ba 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
@@ -35,6 +35,7 @@ import com.google.common.base.Throwables;
 import org.apache.hadoop.hive.common.metrics.common.Metrics;
 import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
 import org.apache.hadoop.hive.ql.exec.spark.Statistic.SparkStatisticsNames;
+import org.apache.hadoop.hive.ql.exec.spark.status.impl.SparkMetricsUtils;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -161,6 +162,7 @@ public class SparkTask extends Task {
 
   if (rc == 0) {
 sparkStatistics = sparkJobStatus.getSparkStatistics();
+printConsoleMetrics();
 printExcessiveGCWarning();
 if (LOG.isInfoEnabled() && sparkStatistics != null) {
   LOG.info(sparkStatisticsToString(sparkStatistics, sparkJobID));
@@ -222,6 +224,79 @@ public class SparkTask extends Task {
 return rc;
   }
 
+  private void printConsoleMetrics() {
+SparkStatisticGroup sparkStatisticGroup = 
sparkStatistics.getStatisticGroup(
+SparkStatisticsNames.SPARK_GROUP_NAME);
+
+if (sparkStatisticGroup != null) {
+  String colon = ": ";
+  String forwardSlash = " / ";
+  String separator = ", ";
+
+  String metricsString = String.format("Spark Job[%d] Metrics: ", 
sparkJobID);
+
+  // Task Duration Time
+  if 
(sparkStatisticGroup.containsSparkStatistic(SparkStatisticsNames.TASK_DURATION_TIME))
 {
+metricsString += SparkStatisticsNames.TASK_DURATION_TIME + colon +
+SparkMetricsUtils.getSparkStatisticAsLong(sparkStatisticGroup,
+SparkStatisticsNames.TASK_DURATION_TIME) + separator;
+  }
+
+  // Executor CPU Time
+  if 
(sparkStatisticGroup.containsSparkStatistic(SparkStatisticsNames.EXECUTOR_CPU_TIME))
 {
+metricsString += SparkStatisticsNames.EXECUTOR_CPU_TIME + colon +
+SparkMetricsUtils.getSparkStatisticAsLong(sparkStatisticGroup,
+  

[3/7] hive git commit: HIVE-19688 Make catalogs updatable (Alan Gates reviewed by Daniel Dai)

2018-06-04 Thread gates
http://git-wip-us.apache.org/repos/asf/hive/blob/c2cc42c1/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
--
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h 
b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
index 78656d9..5c6495e 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -291,6 +291,8 @@ class Catalog;
 
 class CreateCatalogRequest;
 
+class AlterCatalogRequest;
+
 class GetCatalogRequest;
 
 class GetCatalogResponse;
@@ -2450,6 +2452,58 @@ inline std::ostream& operator<<(std::ostream& out, const 
CreateCatalogRequest& o
   return out;
 }
 
+typedef struct _AlterCatalogRequest__isset {
+  _AlterCatalogRequest__isset() : name(false), newCat(false) {}
+  bool name :1;
+  bool newCat :1;
+} _AlterCatalogRequest__isset;
+
+class AlterCatalogRequest {
+ public:
+
+  AlterCatalogRequest(const AlterCatalogRequest&);
+  AlterCatalogRequest& operator=(const AlterCatalogRequest&);
+  AlterCatalogRequest() : name() {
+  }
+
+  virtual ~AlterCatalogRequest() throw();
+  std::string name;
+  Catalog newCat;
+
+  _AlterCatalogRequest__isset __isset;
+
+  void __set_name(const std::string& val);
+
+  void __set_newCat(const Catalog& val);
+
+  bool operator == (const AlterCatalogRequest & rhs) const
+  {
+if (!(name == rhs.name))
+  return false;
+if (!(newCat == rhs.newCat))
+  return false;
+return true;
+  }
+  bool operator != (const AlterCatalogRequest &rhs) const {
+return !(*this == rhs);
+  }
+
+  bool operator < (const AlterCatalogRequest & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(AlterCatalogRequest &a, AlterCatalogRequest &b);
+
+inline std::ostream& operator<<(std::ostream& out, const AlterCatalogRequest& 
obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
 typedef struct _GetCatalogRequest__isset {
   _GetCatalogRequest__isset() : name(false) {}
   bool name :1;

http://git-wip-us.apache.org/repos/asf/hive/blob/c2cc42c1/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterCatalogRequest.java
--
diff --git 
a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterCatalogRequest.java
 
b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterCatalogRequest.java
new file mode 100644
index 000..b9b5117
--- /dev/null
+++ 
b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterCatalogRequest.java
@@ -0,0 +1,504 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public 
@org.apache.hadoop.classification.InterfaceStability.Stable public class 
AlterCatalogRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, 
Comparable {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("AlterCatalogRequest");
+
+  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new 
org.apache.thrift.protocol.TField("name", 
org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField NEW_CAT_FIELD_DESC = 
new org.apache.thrift.protocol.TField("newCat", 
org.apache.thrift.protocol.TType.STRUCT, (short)2);
+
+  private static final Map, SchemeFactory> schemes = 
new HashMap, SchemeFactory>();
+  static {
+schemes.put(StandardS

[6/7] hive git commit: HIVE-19688 Make catalogs updatable (Alan Gates reviewed by Daniel Dai)

2018-06-04 Thread gates
http://git-wip-us.apache.org/repos/asf/hive/blob/c2cc42c1/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
--
diff --git 
a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp 
b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index ddb175e..e459bc2 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -646,6 +646,233 @@ uint32_t 
ThriftHiveMetastore_create_catalog_presult::read(::apache::thrift::prot
 }
 
 
+ThriftHiveMetastore_alter_catalog_args::~ThriftHiveMetastore_alter_catalog_args()
 throw() {
+}
+
+
+uint32_t 
ThriftHiveMetastore_alter_catalog_args::read(::apache::thrift::protocol::TProtocol*
 iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+xfer += iprot->readFieldBegin(fname, ftype, fid);
+if (ftype == ::apache::thrift::protocol::T_STOP) {
+  break;
+}
+switch (fid)
+{
+  case 1:
+if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+  xfer += this->rqst.read(iprot);
+  this->__isset.rqst = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  default:
+xfer += iprot->skip(ftype);
+break;
+}
+xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t 
ThriftHiveMetastore_alter_catalog_args::write(::apache::thrift::protocol::TProtocol*
 oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_catalog_args");
+
+  xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 
1);
+  xfer += this->rqst.write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_alter_catalog_pargs::~ThriftHiveMetastore_alter_catalog_pargs()
 throw() {
+}
+
+
+uint32_t 
ThriftHiveMetastore_alter_catalog_pargs::write(::apache::thrift::protocol::TProtocol*
 oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_catalog_pargs");
+
+  xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 
1);
+  xfer += (*(this->rqst)).write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_alter_catalog_result::~ThriftHiveMetastore_alter_catalog_result()
 throw() {
+}
+
+
+uint32_t 
ThriftHiveMetastore_alter_catalog_result::read(::apache::thrift::protocol::TProtocol*
 iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+xfer += iprot->readFieldBegin(fname, ftype, fid);
+if (ftype == ::apache::thrift::protocol::T_STOP) {
+  break;
+}
+switch (fid)
+{
+  case 1:
+if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+  xfer += this->o1.read(iprot);
+  this->__isset.o1 = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  case 2:
+if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+  xfer += this->o2.read(iprot);
+  this->__isset.o2 = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  case 3:
+if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+  xfer += this->o3.read(iprot);
+  this->__isset.o3 = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  default:
+xfer += iprot->skip(ftype);
+break;
+}
+xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t 
ThriftHiveMetastore_alter_catalog_result::write(::apache::thrift::protocol::TProtocol*
 oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_catalog_result");
+
+  if (this->__isset.o1) {
+xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 
1);
+xfer += this->o1.write(oprot);
+xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o2) {
+xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 
2);
+ 

[5/7] hive git commit: HIVE-19688 Make catalogs updatable (Alan Gates reviewed by Daniel Dai)

2018-06-04 Thread gates
http://git-wip-us.apache.org/repos/asf/hive/blob/c2cc42c1/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
--
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h 
b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
index b7987e3..1d57aee 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
@@ -25,6 +25,7 @@ class ThriftHiveMetastoreIf : virtual public  
::facebook::fb303::FacebookService
   virtual void getMetaConf(std::string& _return, const std::string& key) = 0;
   virtual void setMetaConf(const std::string& key, const std::string& value) = 
0;
   virtual void create_catalog(const CreateCatalogRequest& catalog) = 0;
+  virtual void alter_catalog(const AlterCatalogRequest& rqst) = 0;
   virtual void get_catalog(GetCatalogResponse& _return, const 
GetCatalogRequest& catName) = 0;
   virtual void get_catalogs(GetCatalogsResponse& _return) = 0;
   virtual void drop_catalog(const DropCatalogRequest& catName) = 0;
@@ -266,6 +267,9 @@ class ThriftHiveMetastoreNull : virtual public 
ThriftHiveMetastoreIf , virtual p
   void create_catalog(const CreateCatalogRequest& /* catalog */) {
 return;
   }
+  void alter_catalog(const AlterCatalogRequest& /* rqst */) {
+return;
+  }
   void get_catalog(GetCatalogResponse& /* _return */, const GetCatalogRequest& 
/* catName */) {
 return;
   }
@@ -1248,6 +1252,126 @@ class ThriftHiveMetastore_create_catalog_presult {
 
 };
 
+typedef struct _ThriftHiveMetastore_alter_catalog_args__isset {
+  _ThriftHiveMetastore_alter_catalog_args__isset() : rqst(false) {}
+  bool rqst :1;
+} _ThriftHiveMetastore_alter_catalog_args__isset;
+
+class ThriftHiveMetastore_alter_catalog_args {
+ public:
+
+  ThriftHiveMetastore_alter_catalog_args(const 
ThriftHiveMetastore_alter_catalog_args&);
+  ThriftHiveMetastore_alter_catalog_args& operator=(const 
ThriftHiveMetastore_alter_catalog_args&);
+  ThriftHiveMetastore_alter_catalog_args() {
+  }
+
+  virtual ~ThriftHiveMetastore_alter_catalog_args() throw();
+  AlterCatalogRequest rqst;
+
+  _ThriftHiveMetastore_alter_catalog_args__isset __isset;
+
+  void __set_rqst(const AlterCatalogRequest& val);
+
+  bool operator == (const ThriftHiveMetastore_alter_catalog_args & rhs) const
+  {
+if (!(rqst == rhs.rqst))
+  return false;
+return true;
+  }
+  bool operator != (const ThriftHiveMetastore_alter_catalog_args &rhs) const {
+return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_alter_catalog_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_alter_catalog_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_alter_catalog_pargs() throw();
+  const AlterCatalogRequest* rqst;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_alter_catalog_result__isset {
+  _ThriftHiveMetastore_alter_catalog_result__isset() : o1(false), o2(false), 
o3(false) {}
+  bool o1 :1;
+  bool o2 :1;
+  bool o3 :1;
+} _ThriftHiveMetastore_alter_catalog_result__isset;
+
+class ThriftHiveMetastore_alter_catalog_result {
+ public:
+
+  ThriftHiveMetastore_alter_catalog_result(const 
ThriftHiveMetastore_alter_catalog_result&);
+  ThriftHiveMetastore_alter_catalog_result& operator=(const 
ThriftHiveMetastore_alter_catalog_result&);
+  ThriftHiveMetastore_alter_catalog_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_alter_catalog_result() throw();
+  NoSuchObjectException o1;
+  InvalidOperationException o2;
+  MetaException o3;
+
+  _ThriftHiveMetastore_alter_catalog_result__isset __isset;
+
+  void __set_o1(const NoSuchObjectException& val);
+
+  void __set_o2(const InvalidOperationException& val);
+
+  void __set_o3(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_alter_catalog_result & rhs) const
+  {
+if (!(o1 == rhs.o1))
+  return false;
+if (!(o2 == rhs.o2))
+  return false;
+if (!(o3 == rhs.o3))
+  return false;
+return true;
+  }
+  bool operator != (const ThriftHiveMetastore_alter_catalog_result &rhs) const 
{
+return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_alter_catalog_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_alter_catalog_presult__isset {
+  _ThriftHiveMetastore_alter_catalog_presult__isset() : o1(false), o2(false), 
o3(false) {}
+  bool o1 :1;
+  bool o2 :1;
+  bool o3 :1;
+} _ThriftHiveMetastore_alter_catalog_presult__isset;
+
+class ThriftHiveMetastore_alter_catalog_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_alter_catalog_presult() 

[4/7] hive git commit: HIVE-19688 Make catalogs updatable (Alan Gates reviewed by Daniel Dai)

2018-06-04 Thread gates
http://git-wip-us.apache.org/repos/asf/hive/blob/c2cc42c1/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
--
diff --git 
a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp 
b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index 8925fe2..bc4d168 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -4653,6 +4653,112 @@ void CreateCatalogRequest::printTo(std::ostream& out) 
const {
 }
 
 
+AlterCatalogRequest::~AlterCatalogRequest() throw() {
+}
+
+
+void AlterCatalogRequest::__set_name(const std::string& val) {
+  this->name = val;
+}
+
+void AlterCatalogRequest::__set_newCat(const Catalog& val) {
+  this->newCat = val;
+}
+
+uint32_t AlterCatalogRequest::read(::apache::thrift::protocol::TProtocol* 
iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+xfer += iprot->readFieldBegin(fname, ftype, fid);
+if (ftype == ::apache::thrift::protocol::T_STOP) {
+  break;
+}
+switch (fid)
+{
+  case 1:
+if (ftype == ::apache::thrift::protocol::T_STRING) {
+  xfer += iprot->readString(this->name);
+  this->__isset.name = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  case 2:
+if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+  xfer += this->newCat.read(iprot);
+  this->__isset.newCat = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  default:
+xfer += iprot->skip(ftype);
+break;
+}
+xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t AlterCatalogRequest::write(::apache::thrift::protocol::TProtocol* 
oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("AlterCatalogRequest");
+
+  xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 
1);
+  xfer += oprot->writeString(this->name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("newCat", 
::apache::thrift::protocol::T_STRUCT, 2);
+  xfer += this->newCat.write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(AlterCatalogRequest &a, AlterCatalogRequest &b) {
+  using ::std::swap;
+  swap(a.name, b.name);
+  swap(a.newCat, b.newCat);
+  swap(a.__isset, b.__isset);
+}
+
+AlterCatalogRequest::AlterCatalogRequest(const AlterCatalogRequest& other134) {
+  name = other134.name;
+  newCat = other134.newCat;
+  __isset = other134.__isset;
+}
+AlterCatalogRequest& AlterCatalogRequest::operator=(const AlterCatalogRequest& 
other135) {
+  name = other135.name;
+  newCat = other135.newCat;
+  __isset = other135.__isset;
+  return *this;
+}
+void AlterCatalogRequest::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "AlterCatalogRequest(";
+  out << "name=" << to_string(name);
+  out << ", " << "newCat=" << to_string(newCat);
+  out << ")";
+}
+
+
 GetCatalogRequest::~GetCatalogRequest() throw() {
 }
 
@@ -4722,13 +4828,13 @@ void swap(GetCatalogRequest &a, GetCatalogRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-GetCatalogRequest::GetCatalogRequest(const GetCatalogRequest& other134) {
-  name = other134.name;
-  __isset = other134.__isset;
+GetCatalogRequest::GetCatalogRequest(const GetCatalogRequest& other136) {
+  name = other136.name;
+  __isset = other136.__isset;
 }
-GetCatalogRequest& GetCatalogRequest::operator=(const GetCatalogRequest& 
other135) {
-  name = other135.name;
-  __isset = other135.__isset;
+GetCatalogRequest& GetCatalogRequest::operator=(const GetCatalogRequest& 
other137) {
+  name = other137.name;
+  __isset = other137.__isset;
   return *this;
 }
 void GetCatalogRequest::printTo(std::ostream& out) const {
@@ -4808,13 +4914,13 @@ void swap(GetCatalogResponse &a, GetCatalogResponse &b) 
{
   swap(a.__isset, b.__isset);
 }
 
-GetCatalogResponse::GetCatalogResponse(const GetCatalogResponse& other136) {
-  catalog = other136.catalog;
-  __isset = other136.__isset;
+GetCatalogResponse::GetCatalogResponse(const GetCatalogResponse& other138) {
+  catalog = other138.catalog;
+  __isset = other138.__isset;
 }
-GetCatalogResponse& GetCatalogResponse::operator=(const GetCatalogResponse& 
other137) {
-  catalog = other137.catalog;
-  __isset = other137.__isset;
+GetCatalogResponse& GetCatalogResponse::operator=(const GetCatalogResponse& 
other139) {
+  catalog = othe

[2/7] hive git commit: HIVE-19688 Make catalogs updatable (Alan Gates reviewed by Daniel Dai)

2018-06-04 Thread gates
http://git-wip-us.apache.org/repos/asf/hive/blob/c2cc42c1/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
--
diff --git 
a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
 
b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index 929f328..672ebf9 100644
--- 
a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ 
b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -48,6 +48,8 @@ import org.slf4j.LoggerFactory;
 
 public void create_catalog(CreateCatalogRequest catalog) throws 
AlreadyExistsException, InvalidObjectException, MetaException, 
org.apache.thrift.TException;
 
+public void alter_catalog(AlterCatalogRequest rqst) throws 
NoSuchObjectException, InvalidOperationException, MetaException, 
org.apache.thrift.TException;
+
 public GetCatalogResponse get_catalog(GetCatalogRequest catName) throws 
NoSuchObjectException, MetaException, org.apache.thrift.TException;
 
 public GetCatalogsResponse get_catalogs() throws MetaException, 
org.apache.thrift.TException;
@@ -464,6 +466,8 @@ import org.slf4j.LoggerFactory;
 
 public void create_catalog(CreateCatalogRequest catalog, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
 
+public void alter_catalog(AlterCatalogRequest rqst, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
+
 public void get_catalog(GetCatalogRequest catName, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
 
 public void get_catalogs(org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
@@ -971,6 +975,35 @@ import org.slf4j.LoggerFactory;
   return;
 }
 
+public void alter_catalog(AlterCatalogRequest rqst) throws 
NoSuchObjectException, InvalidOperationException, MetaException, 
org.apache.thrift.TException
+{
+  send_alter_catalog(rqst);
+  recv_alter_catalog();
+}
+
+public void send_alter_catalog(AlterCatalogRequest rqst) throws 
org.apache.thrift.TException
+{
+  alter_catalog_args args = new alter_catalog_args();
+  args.setRqst(rqst);
+  sendBase("alter_catalog", args);
+}
+
+public void recv_alter_catalog() throws NoSuchObjectException, 
InvalidOperationException, MetaException, org.apache.thrift.TException
+{
+  alter_catalog_result result = new alter_catalog_result();
+  receiveBase(result, "alter_catalog");
+  if (result.o1 != null) {
+throw result.o1;
+  }
+  if (result.o2 != null) {
+throw result.o2;
+  }
+  if (result.o3 != null) {
+throw result.o3;
+  }
+  return;
+}
+
 public GetCatalogResponse get_catalog(GetCatalogRequest catName) throws 
NoSuchObjectException, MetaException, org.apache.thrift.TException
 {
   send_get_catalog(catName);
@@ -6922,6 +6955,38 @@ import org.slf4j.LoggerFactory;
   }
 }
 
+public void alter_catalog(AlterCatalogRequest rqst, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException {
+  checkReady();
+  alter_catalog_call method_call = new alter_catalog_call(rqst, 
resultHandler, this, ___protocolFactory, ___transport);
+  this.___currentMethod = method_call;
+  ___manager.call(method_call);
+}
+
+@org.apache.hadoop.classification.InterfaceAudience.Public 
@org.apache.hadoop.classification.InterfaceStability.Stable public static class 
alter_catalog_call extends org.apache.thrift.async.TAsyncMethodCall {
+  private AlterCatalogRequest rqst;
+  public alter_catalog_call(AlterCatalogRequest rqst, 
org.apache.thrift.async.AsyncMethodCallback resultHandler, 
org.apache.thrift.async.TAsyncClient client, 
org.apache.thrift.protocol.TProtocolFactory protocolFactory, 
org.apache.thrift.transport.TNonblockingTransport transport) throws 
org.apache.thrift.TException {
+super(client, protocolFactory, transport, resultHandler, false);
+this.rqst = rqst;
+  }
+
+  public void write_args(org.apache.thrift.protocol.TProtocol prot) throws 
org.apache.thrift.TException {
+prot.writeMessageBegin(new 
org.apache.thrift.protocol.TMessage("alter_catalog", 
org.apache.thrift.protocol.TMessageType.CALL, 0));
+alter_catalog_args args = new alter_catalog_args();
+args.setRqst(rqst);
+args.write(prot);
+prot.writeMessageEnd();
+  }
+
+  public void getResult() throws NoSuchObjectException, 
InvalidOperationException, MetaException, org.apache.thrift.TException {
+if (getState() != 
org

[7/7] hive git commit: HIVE-19688 Make catalogs updatable (Alan Gates reviewed by Daniel Dai)

2018-06-04 Thread gates
HIVE-19688 Make catalogs updatable (Alan Gates reviewed by Daniel Dai)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c2cc42c1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c2cc42c1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c2cc42c1

Branch: refs/heads/branch-3
Commit: c2cc42c1c8010e08ad8519c4f8164adb028f15d0
Parents: dc609b4
Author: Alan Gates 
Authored: Mon Jun 4 11:30:38 2018 -0700
Committer: Alan Gates 
Committed: Mon Jun 4 11:30:38 2018 -0700

--
 .../org/apache/hive/beeline/HiveSchemaTool.java |   83 +-
 .../hive/beeline/TestSchemaToolCatalogOps.java  |   44 +-
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  | 2733 
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.h|  141 +
 .../ThriftHiveMetastore_server.skeleton.cpp |5 +
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp | 6142 +-
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |   54 +
 .../hive/metastore/api/AlterCatalogRequest.java |  504 ++
 .../hive/metastore/api/ThriftHiveMetastore.java | 1738 -
 .../gen-php/metastore/ThriftHiveMetastore.php   |  271 +
 .../src/gen/thrift/gen-php/metastore/Types.php  |  103 +
 .../hive_metastore/ThriftHiveMetastore-remote   |7 +
 .../hive_metastore/ThriftHiveMetastore.py   |  231 +
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |   79 +
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |   18 +
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |   68 +
 .../hadoop/hive/metastore/HiveMetaStore.java|   47 +
 .../hive/metastore/HiveMetaStoreClient.java |5 +
 .../hadoop/hive/metastore/IMetaStoreClient.java |   15 +
 .../hive/metastore/MetaStoreEventListener.java  |4 +
 .../metastore/MetaStoreListenerNotifier.java|3 +
 .../metastore/events/AlterCatalogEvent.java |   44 +
 .../metastore/events/PreAlterCatalogEvent.java  |   40 +
 .../hive/metastore/events/PreEventContext.java  |3 +-
 .../messaging/AlterCatalogMessage.java  |   29 +
 .../hive/metastore/messaging/EventMessage.java  |3 +-
 .../metastore/messaging/MessageFactory.java |3 +
 .../messaging/json/JSONAlterCatalogMessage.java |   90 +
 .../messaging/json/JSONMessageFactory.java  |   12 +
 .../src/main/thrift/hive_metastore.thrift   |6 +
 .../HiveMetaStoreClientPreCatalog.java  |6 +
 .../hive/metastore/client/TestCatalogs.java |   47 +
 32 files changed, 8086 insertions(+), 4492 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/c2cc42c1/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
--
diff --git a/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java 
b/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
index 856b0ac..314dff8 100644
--- a/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
+++ b/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
@@ -914,6 +914,12 @@ public class HiveSchemaTool {
   throw new HiveMetaException("No catalogs found, have you upgraded 
the database?");
 }
 int catNum = rs.getInt(1) + 1;
+// We need to stay out of the way of any sequences used by the 
underlying database.
+// Otherwise the next time the client tries to add a catalog we'll get 
an error.
+// There should never be billions of catalogs, so we'll shift our 
sequence number up
+// there to avoid clashes.
+int floor = 1 << 30;
+if (catNum < floor) catNum = floor;
 
 String update = "insert into " + quoteIf("CTLGS") +
 "(" + quoteIf("CTLG_ID") + ", " + quoteIf("NAME") + ", " + 
quoteAlways("DESC") + ", " + quoteIf( "LOCATION_URI") + ") " +
@@ -936,6 +942,61 @@ public class HiveSchemaTool {
   }
 
   @VisibleForTesting
+  void alterCatalog(String catName, String location, String description) 
throws HiveMetaException {
+if (location == null && description == null) {
+  throw new HiveMetaException("Asked to update catalog " + catName +
+  " but not given any changes to update");
+}
+catName = normalizeIdentifier(catName);
+System.out.println("Updating catalog " + catName);
+
+Connection conn = getConnectionToMetastore(true);
+boolean success = false;
+try {
+  conn.setAutoCommit(false);
+  try (Statement stmt = conn.createStatement()) {
+StringBuilder update = new StringBuilder("update ")
+.append(quoteIf("CTLGS"))
+.append(" set ");
+if (location != null) {
+  update.append(quoteIf("LOCATION_URI"))
+  .append(" = '")
+  .append(location)
+  .append("' ");
+}
+if (description != null) {
+  if (location != null) update.append(", ");
+  update.a

[1/7] hive git commit: HIVE-19688 Make catalogs updatable (Alan Gates reviewed by Daniel Dai)

2018-06-04 Thread gates
Repository: hive
Updated Branches:
  refs/heads/branch-3 dc609b4a3 -> c2cc42c1c


http://git-wip-us.apache.org/repos/asf/hive/blob/c2cc42c1/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
--
diff --git 
a/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php 
b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index 4a37568..ec26cca 100644
--- 
a/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ 
b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -40,6 +40,13 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf {
*/
   public function create_catalog(\metastore\CreateCatalogRequest $catalog);
   /**
+   * @param \metastore\AlterCatalogRequest $rqst
+   * @throws \metastore\NoSuchObjectException
+   * @throws \metastore\InvalidOperationException
+   * @throws \metastore\MetaException
+   */
+  public function alter_catalog(\metastore\AlterCatalogRequest $rqst);
+  /**
* @param \metastore\GetCatalogRequest $catName
* @return \metastore\GetCatalogResponse
* @throws \metastore\NoSuchObjectException
@@ -1721,6 +1728,63 @@ class ThriftHiveMetastoreClient extends 
\FacebookServiceClient implements \metas
 return;
   }
 
+  public function alter_catalog(\metastore\AlterCatalogRequest $rqst)
+  {
+$this->send_alter_catalog($rqst);
+$this->recv_alter_catalog();
+  }
+
+  public function send_alter_catalog(\metastore\AlterCatalogRequest $rqst)
+  {
+$args = new \metastore\ThriftHiveMetastore_alter_catalog_args();
+$args->rqst = $rqst;
+$bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_write_binary');
+if ($bin_accel)
+{
+  thrift_protocol_write_binary($this->output_, 'alter_catalog', 
TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+}
+else
+{
+  $this->output_->writeMessageBegin('alter_catalog', TMessageType::CALL, 
$this->seqid_);
+  $args->write($this->output_);
+  $this->output_->writeMessageEnd();
+  $this->output_->getTransport()->flush();
+}
+  }
+
+  public function recv_alter_catalog()
+  {
+$bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_read_binary');
+if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 
'\metastore\ThriftHiveMetastore_alter_catalog_result', 
$this->input_->isStrictRead());
+else
+{
+  $rseqid = 0;
+  $fname = null;
+  $mtype = 0;
+
+  $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+  if ($mtype == TMessageType::EXCEPTION) {
+$x = new TApplicationException();
+$x->read($this->input_);
+$this->input_->readMessageEnd();
+throw $x;
+  }
+  $result = new \metastore\ThriftHiveMetastore_alter_catalog_result();
+  $result->read($this->input_);
+  $this->input_->readMessageEnd();
+}
+if ($result->o1 !== null) {
+  throw $result->o1;
+}
+if ($result->o2 !== null) {
+  throw $result->o2;
+}
+if ($result->o3 !== null) {
+  throw $result->o3;
+}
+return;
+  }
+
   public function get_catalog(\metastore\GetCatalogRequest $catName)
   {
 $this->send_get_catalog($catName);
@@ -13800,6 +13864,213 @@ class ThriftHiveMetastore_create_catalog_result {
 
 }
 
+class ThriftHiveMetastore_alter_catalog_args {
+  static $_TSPEC;
+
+  /**
+   * @var \metastore\AlterCatalogRequest
+   */
+  public $rqst = null;
+
+  public function __construct($vals=null) {
+if (!isset(self::$_TSPEC)) {
+  self::$_TSPEC = array(
+1 => array(
+  'var' => 'rqst',
+  'type' => TType::STRUCT,
+  'class' => '\metastore\AlterCatalogRequest',
+  ),
+);
+}
+if (is_array($vals)) {
+  if (isset($vals['rqst'])) {
+$this->rqst = $vals['rqst'];
+  }
+}
+  }
+
+  public function getName() {
+return 'ThriftHiveMetastore_alter_catalog_args';
+  }
+
+  public function read($input)
+  {
+$xfer = 0;
+$fname = null;
+$ftype = 0;
+$fid = 0;
+$xfer += $input->readStructBegin($fname);
+while (true)
+{
+  $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+  if ($ftype == TType::STOP) {
+break;
+  }
+  switch ($fid)
+  {
+case 1:
+  if ($ftype == TType::STRUCT) {
+$this->rqst = new \metastore\AlterCatalogRequest();
+$xfer += $this->rqst->read($input);
+  } else {
+$xfer += $input->skip($ftype);
+  }
+  break;
+default:
+  $xfer += $input->skip($ftype);
+  break;
+  }
+  $xfer += $input->readFieldEnd();
+}
+$xfer += $input->readStructEnd();
+return $xfer;
+  }
+
+  public function write($output) {
+ 

[2/7] hive git commit: HIVE-19688 Make catalogs updatable (Alan Gates, reviewed by Daniel Dai).

2018-06-04 Thread gates
http://git-wip-us.apache.org/repos/asf/hive/blob/2028749b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
--
diff --git 
a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
 
b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index 929f328..672ebf9 100644
--- 
a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ 
b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -48,6 +48,8 @@ import org.slf4j.LoggerFactory;
 
 public void create_catalog(CreateCatalogRequest catalog) throws 
AlreadyExistsException, InvalidObjectException, MetaException, 
org.apache.thrift.TException;
 
+public void alter_catalog(AlterCatalogRequest rqst) throws 
NoSuchObjectException, InvalidOperationException, MetaException, 
org.apache.thrift.TException;
+
 public GetCatalogResponse get_catalog(GetCatalogRequest catName) throws 
NoSuchObjectException, MetaException, org.apache.thrift.TException;
 
 public GetCatalogsResponse get_catalogs() throws MetaException, 
org.apache.thrift.TException;
@@ -464,6 +466,8 @@ import org.slf4j.LoggerFactory;
 
 public void create_catalog(CreateCatalogRequest catalog, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
 
+public void alter_catalog(AlterCatalogRequest rqst, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
+
 public void get_catalog(GetCatalogRequest catName, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
 
 public void get_catalogs(org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
@@ -971,6 +975,35 @@ import org.slf4j.LoggerFactory;
   return;
 }
 
+public void alter_catalog(AlterCatalogRequest rqst) throws 
NoSuchObjectException, InvalidOperationException, MetaException, 
org.apache.thrift.TException
+{
+  send_alter_catalog(rqst);
+  recv_alter_catalog();
+}
+
+public void send_alter_catalog(AlterCatalogRequest rqst) throws 
org.apache.thrift.TException
+{
+  alter_catalog_args args = new alter_catalog_args();
+  args.setRqst(rqst);
+  sendBase("alter_catalog", args);
+}
+
+public void recv_alter_catalog() throws NoSuchObjectException, 
InvalidOperationException, MetaException, org.apache.thrift.TException
+{
+  alter_catalog_result result = new alter_catalog_result();
+  receiveBase(result, "alter_catalog");
+  if (result.o1 != null) {
+throw result.o1;
+  }
+  if (result.o2 != null) {
+throw result.o2;
+  }
+  if (result.o3 != null) {
+throw result.o3;
+  }
+  return;
+}
+
 public GetCatalogResponse get_catalog(GetCatalogRequest catName) throws 
NoSuchObjectException, MetaException, org.apache.thrift.TException
 {
   send_get_catalog(catName);
@@ -6922,6 +6955,38 @@ import org.slf4j.LoggerFactory;
   }
 }
 
+public void alter_catalog(AlterCatalogRequest rqst, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException {
+  checkReady();
+  alter_catalog_call method_call = new alter_catalog_call(rqst, 
resultHandler, this, ___protocolFactory, ___transport);
+  this.___currentMethod = method_call;
+  ___manager.call(method_call);
+}
+
+@org.apache.hadoop.classification.InterfaceAudience.Public 
@org.apache.hadoop.classification.InterfaceStability.Stable public static class 
alter_catalog_call extends org.apache.thrift.async.TAsyncMethodCall {
+  private AlterCatalogRequest rqst;
+  public alter_catalog_call(AlterCatalogRequest rqst, 
org.apache.thrift.async.AsyncMethodCallback resultHandler, 
org.apache.thrift.async.TAsyncClient client, 
org.apache.thrift.protocol.TProtocolFactory protocolFactory, 
org.apache.thrift.transport.TNonblockingTransport transport) throws 
org.apache.thrift.TException {
+super(client, protocolFactory, transport, resultHandler, false);
+this.rqst = rqst;
+  }
+
+  public void write_args(org.apache.thrift.protocol.TProtocol prot) throws 
org.apache.thrift.TException {
+prot.writeMessageBegin(new 
org.apache.thrift.protocol.TMessage("alter_catalog", 
org.apache.thrift.protocol.TMessageType.CALL, 0));
+alter_catalog_args args = new alter_catalog_args();
+args.setRqst(rqst);
+args.write(prot);
+prot.writeMessageEnd();
+  }
+
+  public void getResult() throws NoSuchObjectException, 
InvalidOperationException, MetaException, org.apache.thrift.TException {
+if (getState() != 
org

[6/7] hive git commit: HIVE-19688 Make catalogs updatable (Alan Gates, reviewed by Daniel Dai).

2018-06-04 Thread gates
http://git-wip-us.apache.org/repos/asf/hive/blob/2028749b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
--
diff --git 
a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp 
b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index ddb175e..e459bc2 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -646,6 +646,233 @@ uint32_t 
ThriftHiveMetastore_create_catalog_presult::read(::apache::thrift::prot
 }
 
 
+ThriftHiveMetastore_alter_catalog_args::~ThriftHiveMetastore_alter_catalog_args()
 throw() {
+}
+
+
+uint32_t 
ThriftHiveMetastore_alter_catalog_args::read(::apache::thrift::protocol::TProtocol*
 iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+xfer += iprot->readFieldBegin(fname, ftype, fid);
+if (ftype == ::apache::thrift::protocol::T_STOP) {
+  break;
+}
+switch (fid)
+{
+  case 1:
+if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+  xfer += this->rqst.read(iprot);
+  this->__isset.rqst = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  default:
+xfer += iprot->skip(ftype);
+break;
+}
+xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t 
ThriftHiveMetastore_alter_catalog_args::write(::apache::thrift::protocol::TProtocol*
 oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_catalog_args");
+
+  xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 
1);
+  xfer += this->rqst.write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_alter_catalog_pargs::~ThriftHiveMetastore_alter_catalog_pargs()
 throw() {
+}
+
+
+uint32_t 
ThriftHiveMetastore_alter_catalog_pargs::write(::apache::thrift::protocol::TProtocol*
 oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_catalog_pargs");
+
+  xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 
1);
+  xfer += (*(this->rqst)).write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_alter_catalog_result::~ThriftHiveMetastore_alter_catalog_result()
 throw() {
+}
+
+
+uint32_t 
ThriftHiveMetastore_alter_catalog_result::read(::apache::thrift::protocol::TProtocol*
 iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+xfer += iprot->readFieldBegin(fname, ftype, fid);
+if (ftype == ::apache::thrift::protocol::T_STOP) {
+  break;
+}
+switch (fid)
+{
+  case 1:
+if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+  xfer += this->o1.read(iprot);
+  this->__isset.o1 = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  case 2:
+if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+  xfer += this->o2.read(iprot);
+  this->__isset.o2 = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  case 3:
+if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+  xfer += this->o3.read(iprot);
+  this->__isset.o3 = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  default:
+xfer += iprot->skip(ftype);
+break;
+}
+xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t 
ThriftHiveMetastore_alter_catalog_result::write(::apache::thrift::protocol::TProtocol*
 oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_catalog_result");
+
+  if (this->__isset.o1) {
+xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 
1);
+xfer += this->o1.write(oprot);
+xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o2) {
+xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 
2);
+ 

[1/7] hive git commit: HIVE-19688 Make catalogs updatable (Alan Gates, reviewed by Daniel Dai).

2018-06-04 Thread gates
Repository: hive
Updated Branches:
  refs/heads/master ae2b3933c -> 2028749b1


http://git-wip-us.apache.org/repos/asf/hive/blob/2028749b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
--
diff --git 
a/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php 
b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index 4a37568..ec26cca 100644
--- 
a/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ 
b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -40,6 +40,13 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf {
*/
   public function create_catalog(\metastore\CreateCatalogRequest $catalog);
   /**
+   * @param \metastore\AlterCatalogRequest $rqst
+   * @throws \metastore\NoSuchObjectException
+   * @throws \metastore\InvalidOperationException
+   * @throws \metastore\MetaException
+   */
+  public function alter_catalog(\metastore\AlterCatalogRequest $rqst);
+  /**
* @param \metastore\GetCatalogRequest $catName
* @return \metastore\GetCatalogResponse
* @throws \metastore\NoSuchObjectException
@@ -1721,6 +1728,63 @@ class ThriftHiveMetastoreClient extends 
\FacebookServiceClient implements \metas
 return;
   }
 
+  public function alter_catalog(\metastore\AlterCatalogRequest $rqst)
+  {
+$this->send_alter_catalog($rqst);
+$this->recv_alter_catalog();
+  }
+
+  public function send_alter_catalog(\metastore\AlterCatalogRequest $rqst)
+  {
+$args = new \metastore\ThriftHiveMetastore_alter_catalog_args();
+$args->rqst = $rqst;
+$bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_write_binary');
+if ($bin_accel)
+{
+  thrift_protocol_write_binary($this->output_, 'alter_catalog', 
TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+}
+else
+{
+  $this->output_->writeMessageBegin('alter_catalog', TMessageType::CALL, 
$this->seqid_);
+  $args->write($this->output_);
+  $this->output_->writeMessageEnd();
+  $this->output_->getTransport()->flush();
+}
+  }
+
+  public function recv_alter_catalog()
+  {
+$bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_read_binary');
+if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 
'\metastore\ThriftHiveMetastore_alter_catalog_result', 
$this->input_->isStrictRead());
+else
+{
+  $rseqid = 0;
+  $fname = null;
+  $mtype = 0;
+
+  $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+  if ($mtype == TMessageType::EXCEPTION) {
+$x = new TApplicationException();
+$x->read($this->input_);
+$this->input_->readMessageEnd();
+throw $x;
+  }
+  $result = new \metastore\ThriftHiveMetastore_alter_catalog_result();
+  $result->read($this->input_);
+  $this->input_->readMessageEnd();
+}
+if ($result->o1 !== null) {
+  throw $result->o1;
+}
+if ($result->o2 !== null) {
+  throw $result->o2;
+}
+if ($result->o3 !== null) {
+  throw $result->o3;
+}
+return;
+  }
+
   public function get_catalog(\metastore\GetCatalogRequest $catName)
   {
 $this->send_get_catalog($catName);
@@ -13800,6 +13864,213 @@ class ThriftHiveMetastore_create_catalog_result {
 
 }
 
+class ThriftHiveMetastore_alter_catalog_args {
+  static $_TSPEC;
+
+  /**
+   * @var \metastore\AlterCatalogRequest
+   */
+  public $rqst = null;
+
+  public function __construct($vals=null) {
+if (!isset(self::$_TSPEC)) {
+  self::$_TSPEC = array(
+1 => array(
+  'var' => 'rqst',
+  'type' => TType::STRUCT,
+  'class' => '\metastore\AlterCatalogRequest',
+  ),
+);
+}
+if (is_array($vals)) {
+  if (isset($vals['rqst'])) {
+$this->rqst = $vals['rqst'];
+  }
+}
+  }
+
+  public function getName() {
+return 'ThriftHiveMetastore_alter_catalog_args';
+  }
+
+  public function read($input)
+  {
+$xfer = 0;
+$fname = null;
+$ftype = 0;
+$fid = 0;
+$xfer += $input->readStructBegin($fname);
+while (true)
+{
+  $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+  if ($ftype == TType::STOP) {
+break;
+  }
+  switch ($fid)
+  {
+case 1:
+  if ($ftype == TType::STRUCT) {
+$this->rqst = new \metastore\AlterCatalogRequest();
+$xfer += $this->rqst->read($input);
+  } else {
+$xfer += $input->skip($ftype);
+  }
+  break;
+default:
+  $xfer += $input->skip($ftype);
+  break;
+  }
+  $xfer += $input->readFieldEnd();
+}
+$xfer += $input->readStructEnd();
+return $xfer;
+  }
+
+  public function write($output) {
+   

[4/7] hive git commit: HIVE-19688 Make catalogs updatable (Alan Gates, reviewed by Daniel Dai).

2018-06-04 Thread gates
http://git-wip-us.apache.org/repos/asf/hive/blob/2028749b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
--
diff --git 
a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp 
b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index 8925fe2..bc4d168 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -4653,6 +4653,112 @@ void CreateCatalogRequest::printTo(std::ostream& out) 
const {
 }
 
 
+AlterCatalogRequest::~AlterCatalogRequest() throw() {
+}
+
+
+void AlterCatalogRequest::__set_name(const std::string& val) {
+  this->name = val;
+}
+
+void AlterCatalogRequest::__set_newCat(const Catalog& val) {
+  this->newCat = val;
+}
+
+uint32_t AlterCatalogRequest::read(::apache::thrift::protocol::TProtocol* 
iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+xfer += iprot->readFieldBegin(fname, ftype, fid);
+if (ftype == ::apache::thrift::protocol::T_STOP) {
+  break;
+}
+switch (fid)
+{
+  case 1:
+if (ftype == ::apache::thrift::protocol::T_STRING) {
+  xfer += iprot->readString(this->name);
+  this->__isset.name = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  case 2:
+if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+  xfer += this->newCat.read(iprot);
+  this->__isset.newCat = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  default:
+xfer += iprot->skip(ftype);
+break;
+}
+xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t AlterCatalogRequest::write(::apache::thrift::protocol::TProtocol* 
oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("AlterCatalogRequest");
+
+  xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 
1);
+  xfer += oprot->writeString(this->name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("newCat", 
::apache::thrift::protocol::T_STRUCT, 2);
+  xfer += this->newCat.write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(AlterCatalogRequest &a, AlterCatalogRequest &b) {
+  using ::std::swap;
+  swap(a.name, b.name);
+  swap(a.newCat, b.newCat);
+  swap(a.__isset, b.__isset);
+}
+
+AlterCatalogRequest::AlterCatalogRequest(const AlterCatalogRequest& other134) {
+  name = other134.name;
+  newCat = other134.newCat;
+  __isset = other134.__isset;
+}
+AlterCatalogRequest& AlterCatalogRequest::operator=(const AlterCatalogRequest& 
other135) {
+  name = other135.name;
+  newCat = other135.newCat;
+  __isset = other135.__isset;
+  return *this;
+}
+void AlterCatalogRequest::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "AlterCatalogRequest(";
+  out << "name=" << to_string(name);
+  out << ", " << "newCat=" << to_string(newCat);
+  out << ")";
+}
+
+
 GetCatalogRequest::~GetCatalogRequest() throw() {
 }
 
@@ -4722,13 +4828,13 @@ void swap(GetCatalogRequest &a, GetCatalogRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-GetCatalogRequest::GetCatalogRequest(const GetCatalogRequest& other134) {
-  name = other134.name;
-  __isset = other134.__isset;
+GetCatalogRequest::GetCatalogRequest(const GetCatalogRequest& other136) {
+  name = other136.name;
+  __isset = other136.__isset;
 }
-GetCatalogRequest& GetCatalogRequest::operator=(const GetCatalogRequest& 
other135) {
-  name = other135.name;
-  __isset = other135.__isset;
+GetCatalogRequest& GetCatalogRequest::operator=(const GetCatalogRequest& 
other137) {
+  name = other137.name;
+  __isset = other137.__isset;
   return *this;
 }
 void GetCatalogRequest::printTo(std::ostream& out) const {
@@ -4808,13 +4914,13 @@ void swap(GetCatalogResponse &a, GetCatalogResponse &b) 
{
   swap(a.__isset, b.__isset);
 }
 
-GetCatalogResponse::GetCatalogResponse(const GetCatalogResponse& other136) {
-  catalog = other136.catalog;
-  __isset = other136.__isset;
+GetCatalogResponse::GetCatalogResponse(const GetCatalogResponse& other138) {
+  catalog = other138.catalog;
+  __isset = other138.__isset;
 }
-GetCatalogResponse& GetCatalogResponse::operator=(const GetCatalogResponse& 
other137) {
-  catalog = other137.catalog;
-  __isset = other137.__isset;
+GetCatalogResponse& GetCatalogResponse::operator=(const GetCatalogResponse& 
other139) {
+  catalog = othe

[7/7] hive git commit: HIVE-19688 Make catalogs updatable (Alan Gates, reviewed by Daniel Dai).

2018-06-04 Thread gates
HIVE-19688 Make catalogs updatable (Alan Gates, reviewed by Daniel Dai).


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2028749b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2028749b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2028749b

Branch: refs/heads/master
Commit: 2028749b1428d8dcfb2c96bf0ba9fee493a3a573
Parents: ae2b393
Author: Alan Gates 
Authored: Mon Jun 4 11:20:16 2018 -0700
Committer: Alan Gates 
Committed: Mon Jun 4 11:20:16 2018 -0700

--
 .../org/apache/hive/beeline/HiveSchemaTool.java |   83 +-
 .../hive/beeline/TestSchemaToolCatalogOps.java  |   44 +-
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  | 2733 
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.h|  141 +
 .../ThriftHiveMetastore_server.skeleton.cpp |5 +
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp | 6142 +-
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |   54 +
 .../hive/metastore/api/AlterCatalogRequest.java |  504 ++
 .../hive/metastore/api/ThriftHiveMetastore.java | 1738 -
 .../gen-php/metastore/ThriftHiveMetastore.php   |  271 +
 .../src/gen/thrift/gen-php/metastore/Types.php  |  103 +
 .../hive_metastore/ThriftHiveMetastore-remote   |7 +
 .../hive_metastore/ThriftHiveMetastore.py   |  231 +
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |   79 +
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |   18 +
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |   68 +
 .../hadoop/hive/metastore/HiveMetaStore.java|   47 +
 .../hive/metastore/HiveMetaStoreClient.java |5 +
 .../hadoop/hive/metastore/IMetaStoreClient.java |   15 +
 .../hive/metastore/MetaStoreEventListener.java  |4 +
 .../metastore/MetaStoreListenerNotifier.java|3 +
 .../metastore/events/AlterCatalogEvent.java |   44 +
 .../metastore/events/PreAlterCatalogEvent.java  |   40 +
 .../hive/metastore/events/PreEventContext.java  |3 +-
 .../messaging/AlterCatalogMessage.java  |   29 +
 .../hive/metastore/messaging/EventMessage.java  |3 +-
 .../metastore/messaging/MessageFactory.java |3 +
 .../messaging/json/JSONAlterCatalogMessage.java |   90 +
 .../messaging/json/JSONMessageFactory.java  |   12 +
 .../src/main/thrift/hive_metastore.thrift   |6 +
 .../HiveMetaStoreClientPreCatalog.java  |6 +
 .../hive/metastore/client/TestCatalogs.java |   47 +
 32 files changed, 8086 insertions(+), 4492 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/2028749b/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
--
diff --git a/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java 
b/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
index 4245fa3..262eaa2 100644
--- a/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
+++ b/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
@@ -921,6 +921,12 @@ public class HiveSchemaTool {
   throw new HiveMetaException("No catalogs found, have you upgraded 
the database?");
 }
 int catNum = rs.getInt(1) + 1;
+// We need to stay out of the way of any sequences used by the 
underlying database.
+// Otherwise the next time the client tries to add a catalog we'll get 
an error.
+// There should never be billions of catalogs, so we'll shift our 
sequence number up
+// there to avoid clashes.
+int floor = 1 << 30;
+if (catNum < floor) catNum = floor;
 
 String update = "insert into " + quoteIf("CTLGS") +
 "(" + quoteIf("CTLG_ID") + ", " + quoteIf("NAME") + ", " + 
quoteAlways("DESC") + ", " + quoteIf( "LOCATION_URI") + ") " +
@@ -943,6 +949,61 @@ public class HiveSchemaTool {
   }
 
   @VisibleForTesting
+  void alterCatalog(String catName, String location, String description) 
throws HiveMetaException {
+if (location == null && description == null) {
+  throw new HiveMetaException("Asked to update catalog " + catName +
+  " but not given any changes to update");
+}
+catName = normalizeIdentifier(catName);
+System.out.println("Updating catalog " + catName);
+
+Connection conn = getConnectionToMetastore(true);
+boolean success = false;
+try {
+  conn.setAutoCommit(false);
+  try (Statement stmt = conn.createStatement()) {
+StringBuilder update = new StringBuilder("update ")
+.append(quoteIf("CTLGS"))
+.append(" set ");
+if (location != null) {
+  update.append(quoteIf("LOCATION_URI"))
+  .append(" = '")
+  .append(location)
+  .append("' ");
+}
+if (description != null) {
+  if (location != null) update.append(", ");
+  update.a

[3/7] hive git commit: HIVE-19688 Make catalogs updatable (Alan Gates, reviewed by Daniel Dai).

2018-06-04 Thread gates
http://git-wip-us.apache.org/repos/asf/hive/blob/2028749b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
--
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h 
b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
index 78656d9..5c6495e 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -291,6 +291,8 @@ class Catalog;
 
 class CreateCatalogRequest;
 
+class AlterCatalogRequest;
+
 class GetCatalogRequest;
 
 class GetCatalogResponse;
@@ -2450,6 +2452,58 @@ inline std::ostream& operator<<(std::ostream& out, const 
CreateCatalogRequest& o
   return out;
 }
 
+typedef struct _AlterCatalogRequest__isset {
+  _AlterCatalogRequest__isset() : name(false), newCat(false) {}
+  bool name :1;
+  bool newCat :1;
+} _AlterCatalogRequest__isset;
+
+class AlterCatalogRequest {
+ public:
+
+  AlterCatalogRequest(const AlterCatalogRequest&);
+  AlterCatalogRequest& operator=(const AlterCatalogRequest&);
+  AlterCatalogRequest() : name() {
+  }
+
+  virtual ~AlterCatalogRequest() throw();
+  std::string name;
+  Catalog newCat;
+
+  _AlterCatalogRequest__isset __isset;
+
+  void __set_name(const std::string& val);
+
+  void __set_newCat(const Catalog& val);
+
+  bool operator == (const AlterCatalogRequest & rhs) const
+  {
+if (!(name == rhs.name))
+  return false;
+if (!(newCat == rhs.newCat))
+  return false;
+return true;
+  }
+  bool operator != (const AlterCatalogRequest &rhs) const {
+return !(*this == rhs);
+  }
+
+  bool operator < (const AlterCatalogRequest & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(AlterCatalogRequest &a, AlterCatalogRequest &b);
+
+inline std::ostream& operator<<(std::ostream& out, const AlterCatalogRequest& 
obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
 typedef struct _GetCatalogRequest__isset {
   _GetCatalogRequest__isset() : name(false) {}
   bool name :1;

http://git-wip-us.apache.org/repos/asf/hive/blob/2028749b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterCatalogRequest.java
--
diff --git 
a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterCatalogRequest.java
 
b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterCatalogRequest.java
new file mode 100644
index 000..b9b5117
--- /dev/null
+++ 
b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterCatalogRequest.java
@@ -0,0 +1,504 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public 
@org.apache.hadoop.classification.InterfaceStability.Stable public class 
AlterCatalogRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, 
Comparable {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("AlterCatalogRequest");
+
+  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new 
org.apache.thrift.protocol.TField("name", 
org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField NEW_CAT_FIELD_DESC = 
new org.apache.thrift.protocol.TField("newCat", 
org.apache.thrift.protocol.TType.STRUCT, (short)2);
+
+  private static final Map, SchemeFactory> schemes = 
new HashMap, SchemeFactory>();
+  static {
+schemes.put(StandardS

[5/7] hive git commit: HIVE-19688 Make catalogs updatable (Alan Gates, reviewed by Daniel Dai).

2018-06-04 Thread gates
http://git-wip-us.apache.org/repos/asf/hive/blob/2028749b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
--
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h 
b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
index b7987e3..1d57aee 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
@@ -25,6 +25,7 @@ class ThriftHiveMetastoreIf : virtual public  
::facebook::fb303::FacebookService
   virtual void getMetaConf(std::string& _return, const std::string& key) = 0;
   virtual void setMetaConf(const std::string& key, const std::string& value) = 
0;
   virtual void create_catalog(const CreateCatalogRequest& catalog) = 0;
+  virtual void alter_catalog(const AlterCatalogRequest& rqst) = 0;
   virtual void get_catalog(GetCatalogResponse& _return, const 
GetCatalogRequest& catName) = 0;
   virtual void get_catalogs(GetCatalogsResponse& _return) = 0;
   virtual void drop_catalog(const DropCatalogRequest& catName) = 0;
@@ -266,6 +267,9 @@ class ThriftHiveMetastoreNull : virtual public 
ThriftHiveMetastoreIf , virtual p
   void create_catalog(const CreateCatalogRequest& /* catalog */) {
 return;
   }
+  void alter_catalog(const AlterCatalogRequest& /* rqst */) {
+return;
+  }
   void get_catalog(GetCatalogResponse& /* _return */, const GetCatalogRequest& 
/* catName */) {
 return;
   }
@@ -1248,6 +1252,126 @@ class ThriftHiveMetastore_create_catalog_presult {
 
 };
 
+typedef struct _ThriftHiveMetastore_alter_catalog_args__isset {
+  _ThriftHiveMetastore_alter_catalog_args__isset() : rqst(false) {}
+  bool rqst :1;
+} _ThriftHiveMetastore_alter_catalog_args__isset;
+
+class ThriftHiveMetastore_alter_catalog_args {
+ public:
+
+  ThriftHiveMetastore_alter_catalog_args(const 
ThriftHiveMetastore_alter_catalog_args&);
+  ThriftHiveMetastore_alter_catalog_args& operator=(const 
ThriftHiveMetastore_alter_catalog_args&);
+  ThriftHiveMetastore_alter_catalog_args() {
+  }
+
+  virtual ~ThriftHiveMetastore_alter_catalog_args() throw();
+  AlterCatalogRequest rqst;
+
+  _ThriftHiveMetastore_alter_catalog_args__isset __isset;
+
+  void __set_rqst(const AlterCatalogRequest& val);
+
+  bool operator == (const ThriftHiveMetastore_alter_catalog_args & rhs) const
+  {
+if (!(rqst == rhs.rqst))
+  return false;
+return true;
+  }
+  bool operator != (const ThriftHiveMetastore_alter_catalog_args &rhs) const {
+return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_alter_catalog_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_alter_catalog_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_alter_catalog_pargs() throw();
+  const AlterCatalogRequest* rqst;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_alter_catalog_result__isset {
+  _ThriftHiveMetastore_alter_catalog_result__isset() : o1(false), o2(false), 
o3(false) {}
+  bool o1 :1;
+  bool o2 :1;
+  bool o3 :1;
+} _ThriftHiveMetastore_alter_catalog_result__isset;
+
+class ThriftHiveMetastore_alter_catalog_result {
+ public:
+
+  ThriftHiveMetastore_alter_catalog_result(const 
ThriftHiveMetastore_alter_catalog_result&);
+  ThriftHiveMetastore_alter_catalog_result& operator=(const 
ThriftHiveMetastore_alter_catalog_result&);
+  ThriftHiveMetastore_alter_catalog_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_alter_catalog_result() throw();
+  NoSuchObjectException o1;
+  InvalidOperationException o2;
+  MetaException o3;
+
+  _ThriftHiveMetastore_alter_catalog_result__isset __isset;
+
+  void __set_o1(const NoSuchObjectException& val);
+
+  void __set_o2(const InvalidOperationException& val);
+
+  void __set_o3(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_alter_catalog_result & rhs) const
+  {
+if (!(o1 == rhs.o1))
+  return false;
+if (!(o2 == rhs.o2))
+  return false;
+if (!(o3 == rhs.o3))
+  return false;
+return true;
+  }
+  bool operator != (const ThriftHiveMetastore_alter_catalog_result &rhs) const 
{
+return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_alter_catalog_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_alter_catalog_presult__isset {
+  _ThriftHiveMetastore_alter_catalog_presult__isset() : o1(false), o2(false), 
o3(false) {}
+  bool o1 :1;
+  bool o2 :1;
+  bool o3 :1;
+} _ThriftHiveMetastore_alter_catalog_presult__isset;
+
+class ThriftHiveMetastore_alter_catalog_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_alter_catalog_presult() 

hive git commit: HIVE-19700: Workaround for JLine issue with UnsupportedTerminal (Naveen Gangam, reviewed by Yongzhi Chen)

2018-06-04 Thread ngangam
Repository: hive
Updated Branches:
  refs/heads/branch-2 14c7c0c5c -> 7362ea6d4


HIVE-19700: Workaround for JLine issue with UnsupportedTerminal (Naveen Gangam, 
reviewed by Yongzhi Chen)

Conflicts:
beeline/src/java/org/apache/hive/beeline/Commands.java


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7362ea6d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7362ea6d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7362ea6d

Branch: refs/heads/branch-2
Commit: 7362ea6d4a69af51e216a3420bd149408cac2743
Parents: 14c7c0c
Author: Naveen Gangam 
Authored: Mon Jun 4 10:25:07 2018 -0400
Committer: Naveen Gangam 
Committed: Mon Jun 4 11:39:23 2018 -0400

--
 beeline/src/java/org/apache/hive/beeline/BeeLine.java  | 5 -
 beeline/src/java/org/apache/hive/beeline/Commands.java | 5 -
 2 files changed, 8 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/7362ea6d/beeline/src/java/org/apache/hive/beeline/BeeLine.java
--
diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLine.java 
b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
index f8c0dd3..118d0eb 100644
--- a/beeline/src/java/org/apache/hive/beeline/BeeLine.java
+++ b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
@@ -1176,12 +1176,15 @@ public class BeeLine implements Closeable {
 
   private int execute(ConsoleReader reader, boolean exitOnError) {
 int lastExecutionResult = ERRNO_OK;
+Character mask = (System.getProperty("jline.terminal", 
"").equals("jline.UnsupportedTerminal")) ? null
+   : ConsoleReader.NULL_MASK;
+
 while (!exit) {
   try {
 // Execute one instruction; terminate on executing a script if there 
is an error
 // in silent mode, prevent the query and prompt being echoed back to 
terminal
 String line = (getOpts().isSilent() && getOpts().getScriptFile() != 
null) ? reader
-.readLine(null, ConsoleReader.NULL_MASK) : 
reader.readLine(getPrompt());
+.readLine(null, mask) : reader.readLine(getPrompt());
 
 // trim line
 if (line != null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/7362ea6d/beeline/src/java/org/apache/hive/beeline/Commands.java
--
diff --git a/beeline/src/java/org/apache/hive/beeline/Commands.java 
b/beeline/src/java/org/apache/hive/beeline/Commands.java
index 2578728..35b3b3b 100644
--- a/beeline/src/java/org/apache/hive/beeline/Commands.java
+++ b/beeline/src/java/org/apache/hive/beeline/Commands.java
@@ -1081,6 +1081,9 @@ public class Commands {
 //When using -e, console reader is not initialized and command is always a 
single line
 int[] startQuote = {-1};
 line = removeComments(line,startQuote);
+Character mask = (System.getProperty("jline.terminal", 
"").equals("jline.UnsupportedTerminal")) ? null
+   : jline.console.ConsoleReader.NULL_MASK;
+
 while (isMultiLine(line) && beeLine.getOpts().isAllowMultiLineCommand()) {
   StringBuilder prompt = new StringBuilder(beeLine.getPrompt());
   if (!beeLine.getOpts().isSilent()) {
@@ -1097,7 +1100,7 @@ public class Commands {
 + "is a multi-line command using -e option and which requires 
further reading from console");
   }
   if (beeLine.getOpts().isSilent() && beeLine.getOpts().getScriptFile() != 
null) {
-extra = beeLine.getConsoleReader().readLine(null, 
jline.console.ConsoleReader.NULL_MASK);
+extra = beeLine.getConsoleReader().readLine(null, mask);
   } else {
 extra = beeLine.getConsoleReader().readLine(prompt.toString());
   }



hive git commit: HIVE-19700: Workaround for JLine issue with UnsupportedTerminal (Naveen Gangam, reviewed by Yongzhi Chen)

2018-06-04 Thread ngangam
Repository: hive
Updated Branches:
  refs/heads/branch-3 b6df4c682 -> dc609b4a3


HIVE-19700: Workaround for JLine issue with UnsupportedTerminal (Naveen Gangam, 
reviewed by Yongzhi Chen)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/dc609b4a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/dc609b4a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/dc609b4a

Branch: refs/heads/branch-3
Commit: dc609b4a3669ea0b59baea4226722837eedc5fd6
Parents: b6df4c6
Author: Naveen Gangam 
Authored: Mon Jun 4 10:25:07 2018 -0400
Committer: Naveen Gangam 
Committed: Mon Jun 4 10:47:15 2018 -0400

--
 beeline/src/java/org/apache/hive/beeline/BeeLine.java  | 5 -
 beeline/src/java/org/apache/hive/beeline/Commands.java | 5 -
 2 files changed, 8 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/dc609b4a/beeline/src/java/org/apache/hive/beeline/BeeLine.java
--
diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLine.java 
b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
index f656fae..0d98679 100644
--- a/beeline/src/java/org/apache/hive/beeline/BeeLine.java
+++ b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
@@ -1252,12 +1252,15 @@ public class BeeLine implements Closeable {
 
   private int execute(ConsoleReader reader, boolean exitOnError) {
 int lastExecutionResult = ERRNO_OK;
+Character mask = (System.getProperty("jline.terminal", 
"").equals("jline.UnsupportedTerminal")) ? null
+   : ConsoleReader.NULL_MASK;
+
 while (!exit) {
   try {
 // Execute one instruction; terminate on executing a script if there 
is an error
 // in silent mode, prevent the query and prompt being echoed back to 
terminal
 String line = (getOpts().isSilent() && getOpts().getScriptFile() != 
null) ? reader
-.readLine(null, ConsoleReader.NULL_MASK) : 
reader.readLine(getPrompt());
+.readLine(null, mask) : reader.readLine(getPrompt());
 
 // trim line
 if (line != null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/dc609b4a/beeline/src/java/org/apache/hive/beeline/Commands.java
--
diff --git a/beeline/src/java/org/apache/hive/beeline/Commands.java 
b/beeline/src/java/org/apache/hive/beeline/Commands.java
index e46c0cf..851042f 100644
--- a/beeline/src/java/org/apache/hive/beeline/Commands.java
+++ b/beeline/src/java/org/apache/hive/beeline/Commands.java
@@ -1074,6 +1074,9 @@ public class Commands {
   public String handleMultiLineCmd(String line) throws IOException {
 int[] startQuote = {-1};
 line = HiveStringUtils.removeComments(line, startQuote);
+Character mask = (System.getProperty("jline.terminal", 
"").equals("jline.UnsupportedTerminal")) ? null
+   : jline.console.ConsoleReader.NULL_MASK;
+
 while (isMultiLine(line) && beeLine.getOpts().isAllowMultiLineCommand()) {
   StringBuilder prompt = new StringBuilder(beeLine.getPrompt());
   if (!beeLine.getOpts().isSilent()) {
@@ -1090,7 +1093,7 @@ public class Commands {
 + "is a multi-line command using -e option and which requires 
further reading from console");
   }
   if (beeLine.getOpts().isSilent() && beeLine.getOpts().getScriptFile() != 
null) {
-extra = beeLine.getConsoleReader().readLine(null, 
jline.console.ConsoleReader.NULL_MASK);
+extra = beeLine.getConsoleReader().readLine(null, mask);
   } else {
 extra = beeLine.getConsoleReader().readLine(prompt.toString());
   }



hive git commit: HIVE-19700: Workaround for JLine issue with UnsupportedTerminal (Naveen Gangam, reviewed by Yongzhi Chen)

2018-06-04 Thread ngangam
Repository: hive
Updated Branches:
  refs/heads/master 439a6e605 -> ae2b3933c


HIVE-19700: Workaround for JLine issue with UnsupportedTerminal (Naveen Gangam, 
reviewed by Yongzhi Chen)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ae2b3933
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ae2b3933
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ae2b3933

Branch: refs/heads/master
Commit: ae2b3933c6ec81b54fd5e41877ebbf4aba470b34
Parents: 439a6e6
Author: Naveen Gangam 
Authored: Mon Jun 4 10:25:07 2018 -0400
Committer: Naveen Gangam 
Committed: Mon Jun 4 10:26:11 2018 -0400

--
 beeline/src/java/org/apache/hive/beeline/BeeLine.java  | 5 -
 beeline/src/java/org/apache/hive/beeline/Commands.java | 5 -
 2 files changed, 8 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/ae2b3933/beeline/src/java/org/apache/hive/beeline/BeeLine.java
--
diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLine.java 
b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
index 4ae2e3f..860d36e 100644
--- a/beeline/src/java/org/apache/hive/beeline/BeeLine.java
+++ b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
@@ -1256,12 +1256,15 @@ public class BeeLine implements Closeable {
 
   private int execute(ConsoleReader reader, boolean exitOnError) {
 int lastExecutionResult = ERRNO_OK;
+Character mask = (System.getProperty("jline.terminal", 
"").equals("jline.UnsupportedTerminal")) ? null
+   : ConsoleReader.NULL_MASK;
+
 while (!exit) {
   try {
 // Execute one instruction; terminate on executing a script if there 
is an error
 // in silent mode, prevent the query and prompt being echoed back to 
terminal
 String line = (getOpts().isSilent() && getOpts().getScriptFile() != 
null) ? reader
-.readLine(null, ConsoleReader.NULL_MASK) : 
reader.readLine(getPrompt());
+.readLine(null, mask) : reader.readLine(getPrompt());
 
 // trim line
 if (line != null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ae2b3933/beeline/src/java/org/apache/hive/beeline/Commands.java
--
diff --git a/beeline/src/java/org/apache/hive/beeline/Commands.java 
b/beeline/src/java/org/apache/hive/beeline/Commands.java
index 4d654d4..f4dd586 100644
--- a/beeline/src/java/org/apache/hive/beeline/Commands.java
+++ b/beeline/src/java/org/apache/hive/beeline/Commands.java
@@ -1074,6 +1074,9 @@ public class Commands {
   public String handleMultiLineCmd(String line) throws IOException {
 int[] startQuote = {-1};
 line = HiveStringUtils.removeComments(line, startQuote);
+Character mask = (System.getProperty("jline.terminal", 
"").equals("jline.UnsupportedTerminal")) ? null
+   : jline.console.ConsoleReader.NULL_MASK;
+
 while (isMultiLine(line) && beeLine.getOpts().isAllowMultiLineCommand()) {
   StringBuilder prompt = new StringBuilder(beeLine.getPrompt());
   if (!beeLine.getOpts().isSilent()) {
@@ -1090,7 +1093,7 @@ public class Commands {
 + "is a multi-line command using -e option and which requires 
further reading from console");
   }
   if (beeLine.getOpts().isSilent() && beeLine.getOpts().getScriptFile() != 
null) {
-extra = beeLine.getConsoleReader().readLine(null, 
jline.console.ConsoleReader.NULL_MASK);
+extra = beeLine.getConsoleReader().readLine(null, mask);
   } else {
 extra = beeLine.getConsoleReader().readLine(prompt.toString());
   }



hive git commit: HIVE-19730: fix TestTablesGetExists's flakiness (Peter Vary, reviewed by Jesus Camacho Rodriguez)

2018-06-04 Thread pvary
Repository: hive
Updated Branches:
  refs/heads/master a95ec1e14 -> 439a6e605


HIVE-19730: fix TestTablesGetExists's flakiness (Peter Vary, reviewed by Jesus 
Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/439a6e60
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/439a6e60
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/439a6e60

Branch: refs/heads/master
Commit: 439a6e605d078cb167e9626374f7163d7a5caa26
Parents: a95ec1e
Author: Peter Vary 
Authored: Mon Jun 4 12:27:00 2018 +0200
Committer: Peter Vary 
Committed: Mon Jun 4 12:27:00 2018 +0200

--
 .../hadoop/hive/metastore/client/TestAddPartitions.java | 4 +++-
 .../metastore/client/TestAddPartitionsFromPartSpec.java | 6 +-
 .../hadoop/hive/metastore/client/TestAlterPartitions.java   | 6 +-
 .../hadoop/hive/metastore/client/TestAppendPartitions.java  | 6 +-
 .../apache/hadoop/hive/metastore/client/TestCatalogs.java   | 7 +--
 .../hadoop/hive/metastore/client/TestCheckConstraint.java   | 7 +--
 .../apache/hadoop/hive/metastore/client/TestDatabases.java  | 6 +-
 .../hadoop/hive/metastore/client/TestDefaultConstraint.java | 7 +--
 .../hadoop/hive/metastore/client/TestDropPartitions.java| 6 +-
 .../hive/metastore/client/TestExchangePartitions.java   | 6 +-
 .../apache/hadoop/hive/metastore/client/TestForeignKey.java | 7 +--
 .../apache/hadoop/hive/metastore/client/TestFunctions.java  | 6 +-
 .../hadoop/hive/metastore/client/TestGetPartitions.java | 6 +-
 .../hadoop/hive/metastore/client/TestGetTableMeta.java  | 6 +-
 .../hadoop/hive/metastore/client/TestListPartitions.java| 6 +-
 .../hadoop/hive/metastore/client/TestNotNullConstraint.java | 7 +--
 .../apache/hadoop/hive/metastore/client/TestPrimaryKey.java | 7 +--
 .../hadoop/hive/metastore/client/TestRuntimeStats.java  | 9 +
 .../metastore/client/TestTablesCreateDropAlterTruncate.java | 6 +-
 .../hadoop/hive/metastore/client/TestTablesGetExists.java   | 6 +-
 .../apache/hadoop/hive/metastore/client/TestTablesList.java | 6 +-
 .../hadoop/hive/metastore/client/TestUniqueConstraint.java  | 7 +--
 22 files changed, 108 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/439a6e60/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java
--
diff --git 
a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java
 
b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java
index 88064d9..bf559b4 100644
--- 
a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java
+++ 
b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java
@@ -95,8 +95,10 @@ public class TestAddPartitions extends MetaStoreClientTest {
   @After
   public void tearDown() throws Exception {
 try {
-  if (client != null) {
+  try {
 client.close();
+  } catch (Exception e) {
+// HIVE-19729: Shallow the exceptions based on the discussion in the 
Jira
   }
 } finally {
   client = null;

http://git-wip-us.apache.org/repos/asf/hive/blob/439a6e60/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java
--
diff --git 
a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java
 
b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java
index debcd0e..4f11a55 100644
--- 
a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java
+++ 
b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java
@@ -95,7 +95,11 @@ public class TestAddPartitionsFromPartSpec extends 
MetaStoreClientTest {
   public void tearDown() throws Exception {
 try {
   if (client != null) {
-client.close();
+try {
+  client.close();
+} catch (Exception e) {
+  // HIVE-19729: Shallow the exceptions based on the discussion in the 
Jira
+}
   }
 } finally {
   client = null;

http://git-wip-us.apache.org/repos/asf/hive/blob/439a6e60/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
--
diff --git 
a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartit