hive git commit: HIVE-13396: LLAP: Include hadoop-metrics2.properties file LlapServiceDriver (Prasanth Jayachandran reviewed by Sergey Shelukhin)

2016-04-04 Thread prasanthj
Repository: hive
Updated Branches:
  refs/heads/master 91ab819a1 -> f3358b036


HIVE-13396: LLAP: Include hadoop-metrics2.properties file LlapServiceDriver 
(Prasanth Jayachandran reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f3358b03
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f3358b03
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f3358b03

Branch: refs/heads/master
Commit: f3358b036f1d14d369711e8863b904d04ef75a04
Parents: 91ab819
Author: Prasanth Jayachandran 
Authored: Mon Apr 4 19:15:07 2016 -0500
Committer: Prasanth Jayachandran 
Committed: Mon Apr 4 19:15:07 2016 -0500

--
 .../hadoop/hive/llap/cli/LlapServiceDriver.java |  8 
 .../hive/llap/daemon/impl/LlapDaemon.java   |  1 +
 .../llap/metrics/LlapDaemonCacheMetrics.java|  4 +-
 .../llap/metrics/LlapDaemonExecutorMetrics.java |  4 +-
 .../llap/metrics/LlapDaemonQueueMetrics.java|  4 +-
 .../hadoop/hive/llap/metrics/MetricsUtils.java  |  1 -
 .../hadoop-metrics2.properties.template | 50 
 7 files changed, 65 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/f3358b03/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
--
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
index 1f3b930..8cd6df7 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
@@ -399,6 +399,14 @@ public class LlapServiceDriver {
 IOUtils.copyBytes(loggerContent,
 lfs.create(new Path(confPath, "llap-daemon-log4j2.properties"), true), 
conf, true);
 
+URL metrics2 = 
conf.getResource(LlapDaemon.HADOOP_METRICS2_PROPERTIES_FILE);
+if (metrics2 != null) {
+  InputStream metrics2FileStream = metrics2.openStream();
+  IOUtils.copyBytes(metrics2FileStream,
+  lfs.create(new Path(confPath, 
LlapDaemon.HADOOP_METRICS2_PROPERTIES_FILE), true),
+  conf, true);
+}
+
 PrintWriter udfStream =
 new PrintWriter(lfs.create(new Path(confPath, 
StaticPermanentFunctionChecker.PERMANENT_FUNCTIONS_LIST)));
 for (String udfClass : allowedUdfs) {

http://git-wip-us.apache.org/repos/asf/hive/blob/f3358b03/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
--
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
index 2fe59a2..8600832 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
@@ -73,6 +73,7 @@ public class LlapDaemon extends CompositeService implements 
ContainerRunner, Lla
   private static final Logger LOG = LoggerFactory.getLogger(LlapDaemon.class);
 
   public static final String LOG4j2_PROPERTIES_FILE = 
"llap-daemon-log4j2.properties";
+  public static final String HADOOP_METRICS2_PROPERTIES_FILE = 
"hadoop-metrics2.properties";
   private final Configuration shuffleHandlerConf;
   private final LlapProtocolServerImpl server;
   private final ContainerRunnerImpl containerRunner;

http://git-wip-us.apache.org/repos/asf/hive/blob/f3358b03/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonCacheMetrics.java
--
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonCacheMetrics.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonCacheMetrics.java
index 52057e4..b89c6c4 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonCacheMetrics.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonCacheMetrics.java
@@ -45,7 +45,7 @@ import com.google.common.annotations.VisibleForTesting;
 /**
  * Llap daemon cache metrics source.
  */
-@Metrics(about = "LlapDaemon Cache Metrics", context = 
MetricsUtils.METRICS_CONTEXT)
+@Metrics(about = "LlapDaemon Cache Metrics", context = "cache")
 public class LlapDaemonCacheMetrics implements MetricsSource {
   final String name;
   private String sessionId;
@@ -127,7 +127,7 @@ public class LlapDaemonCacheMetrics implements 
MetricsSource {
   @Override
   public void getMetrics(MetricsCollector collector, boolean b) {
 MetricsRecordBuilder rb = collector.addRecord(CacheMetrics)
-

hive git commit: HIVE-13330: ORC vectorized string dictionary reader does not differentiate null vs empty string dictionary (Prasanth Jayachandran reviewed by Matt McCline)

2016-04-04 Thread prasanthj
Repository: hive
Updated Branches:
  refs/heads/master f3358b036 -> b04665948


HIVE-13330: ORC vectorized string dictionary reader does not differentiate null 
vs empty string dictionary (Prasanth Jayachandran reviewed by Matt McCline)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b0466594
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b0466594
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b0466594

Branch: refs/heads/master
Commit: b04665948acbd6b1a793a287987984f4dfb19631
Parents: f3358b0
Author: Prasanth Jayachandran 
Authored: Mon Apr 4 19:33:01 2016 -0500
Committer: Prasanth Jayachandran 
Committed: Mon Apr 4 19:33:01 2016 -0500

--
 .../hive/ql/io/orc/TreeReaderFactory.java   | 20 +--
 .../vector_orc_string_reader_empty_dict.q   | 20 +++
 .../vector_orc_string_reader_empty_dict.q.out   | 62 
 3 files changed, 97 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b0466594/ql/src/java/org/apache/hadoop/hive/ql/io/orc/TreeReaderFactory.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/TreeReaderFactory.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/TreeReaderFactory.java
index d74a854..8bb32ea 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/TreeReaderFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/TreeReaderFactory.java
@@ -1683,6 +1683,7 @@ public class TreeReaderFactory {
* stripe.
*/
   public static class StringDictionaryTreeReader extends TreeReader {
+private static final byte[] EMPTY_BYTE_ARRAY = new byte[0];
 private DynamicByteArray dictionaryBuffer;
 private int[] dictionaryOffsets;
 protected IntegerReader reader;
@@ -1862,11 +1863,20 @@ public class TreeReaderFactory {
 }
 result.isRepeating = scratchlcv.isRepeating;
   } else {
-// Entire stripe contains null strings.
-result.isRepeating = true;
-result.noNulls = false;
-result.isNull[0] = true;
-result.setRef(0, "".getBytes(), 0, 0);
+if (dictionaryOffsets == null) {
+  // Entire stripe contains null strings.
+  result.isRepeating = true;
+  result.noNulls = false;
+  result.isNull[0] = true;
+  result.setRef(0, EMPTY_BYTE_ARRAY, 0, 0);
+} else {
+  // stripe contains nulls and empty strings
+  for (int i = 0; i < batchSize; i++) {
+if (!result.isNull[i]) {
+  result.setRef(i, EMPTY_BYTE_ARRAY, 0, 0);
+}
+  }
+}
   }
   return result;
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/b0466594/ql/src/test/queries/clientpositive/vector_orc_string_reader_empty_dict.q
--
diff --git 
a/ql/src/test/queries/clientpositive/vector_orc_string_reader_empty_dict.q 
b/ql/src/test/queries/clientpositive/vector_orc_string_reader_empty_dict.q
new file mode 100644
index 000..0e8a743
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/vector_orc_string_reader_empty_dict.q
@@ -0,0 +1,20 @@
+create table orcstr (vcol varchar(20)) stored as orc;
+
+insert overwrite table orcstr select null from src;
+
+SET hive.fetch.task.conversion=none;
+
+SET hive.vectorized.execution.enabled=false;
+select vcol from orcstr limit 1;
+
+SET hive.vectorized.execution.enabled=true;
+select vcol from orcstr limit 1;
+
+insert overwrite table orcstr select "" from src;
+
+SET hive.vectorized.execution.enabled=false;
+select vcol from orcstr limit 1;
+
+SET hive.vectorized.execution.enabled=true;
+select vcol from orcstr limit 1;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/b0466594/ql/src/test/results/clientpositive/vector_orc_string_reader_empty_dict.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/vector_orc_string_reader_empty_dict.q.out 
b/ql/src/test/results/clientpositive/vector_orc_string_reader_empty_dict.q.out
new file mode 100644
index 000..4f00bed
--- /dev/null
+++ 
b/ql/src/test/results/clientpositive/vector_orc_string_reader_empty_dict.q.out
@@ -0,0 +1,62 @@
+PREHOOK: query: create table orcstr (vcol varchar(20)) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcstr
+POSTHOOK: query: create table orcstr (vcol varchar(20)) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcstr
+PREHOOK: query: insert overwrite table orcstr select null from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@orcstr

hive git commit: HIVE-13330: ORC vectorized string dictionary reader does not differentiate null vs empty string dictionary (Prasanth Jayachandran reviewed by Matt McCline)

2016-04-04 Thread prasanthj
Repository: hive
Updated Branches:
  refs/heads/branch-2.0 bae499c91 -> 63f53069c


HIVE-13330: ORC vectorized string dictionary reader does not differentiate null 
vs empty string dictionary (Prasanth Jayachandran reviewed by Matt McCline)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/63f53069
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/63f53069
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/63f53069

Branch: refs/heads/branch-2.0
Commit: 63f53069c438efc868de5c8f9bd8ce35aa229ae2
Parents: bae499c
Author: Prasanth Jayachandran 
Authored: Mon Apr 4 19:33:01 2016 -0500
Committer: Prasanth Jayachandran 
Committed: Mon Apr 4 19:33:36 2016 -0500

--
 .../hive/ql/io/orc/TreeReaderFactory.java   | 20 +--
 .../vector_orc_string_reader_empty_dict.q   | 20 +++
 .../vector_orc_string_reader_empty_dict.q.out   | 62 
 3 files changed, 97 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/63f53069/ql/src/java/org/apache/hadoop/hive/ql/io/orc/TreeReaderFactory.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/TreeReaderFactory.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/TreeReaderFactory.java
index 2c13d68..92965ff 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/TreeReaderFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/TreeReaderFactory.java
@@ -1679,6 +1679,7 @@ public class TreeReaderFactory {
* stripe.
*/
   public static class StringDictionaryTreeReader extends TreeReader {
+private static final byte[] EMPTY_BYTE_ARRAY = new byte[0];
 private DynamicByteArray dictionaryBuffer;
 private int[] dictionaryOffsets;
 protected IntegerReader reader;
@@ -1858,11 +1859,20 @@ public class TreeReaderFactory {
 }
 result.isRepeating = scratchlcv.isRepeating;
   } else {
-// Entire stripe contains null strings.
-result.isRepeating = true;
-result.noNulls = false;
-result.isNull[0] = true;
-result.setRef(0, "".getBytes(), 0, 0);
+if (dictionaryOffsets == null) {
+  // Entire stripe contains null strings.
+  result.isRepeating = true;
+  result.noNulls = false;
+  result.isNull[0] = true;
+  result.setRef(0, EMPTY_BYTE_ARRAY, 0, 0);
+} else {
+  // stripe contains nulls and empty strings
+  for (int i = 0; i < batchSize; i++) {
+if (!result.isNull[i]) {
+  result.setRef(i, EMPTY_BYTE_ARRAY, 0, 0);
+}
+  }
+}
   }
   return result;
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/63f53069/ql/src/test/queries/clientpositive/vector_orc_string_reader_empty_dict.q
--
diff --git 
a/ql/src/test/queries/clientpositive/vector_orc_string_reader_empty_dict.q 
b/ql/src/test/queries/clientpositive/vector_orc_string_reader_empty_dict.q
new file mode 100644
index 000..0e8a743
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/vector_orc_string_reader_empty_dict.q
@@ -0,0 +1,20 @@
+create table orcstr (vcol varchar(20)) stored as orc;
+
+insert overwrite table orcstr select null from src;
+
+SET hive.fetch.task.conversion=none;
+
+SET hive.vectorized.execution.enabled=false;
+select vcol from orcstr limit 1;
+
+SET hive.vectorized.execution.enabled=true;
+select vcol from orcstr limit 1;
+
+insert overwrite table orcstr select "" from src;
+
+SET hive.vectorized.execution.enabled=false;
+select vcol from orcstr limit 1;
+
+SET hive.vectorized.execution.enabled=true;
+select vcol from orcstr limit 1;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/63f53069/ql/src/test/results/clientpositive/vector_orc_string_reader_empty_dict.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/vector_orc_string_reader_empty_dict.q.out 
b/ql/src/test/results/clientpositive/vector_orc_string_reader_empty_dict.q.out
new file mode 100644
index 000..4f00bed
--- /dev/null
+++ 
b/ql/src/test/results/clientpositive/vector_orc_string_reader_empty_dict.q.out
@@ -0,0 +1,62 @@
+PREHOOK: query: create table orcstr (vcol varchar(20)) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcstr
+POSTHOOK: query: create table orcstr (vcol varchar(20)) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcstr
+PREHOOK: query: insert overwrite table orcstr select null from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: 

hive git commit: HIVE-13330: ORC vectorized string dictionary reader does not differentiate null vs empty string dictionary (Prasanth Jayachandran reviewed by Matt McCline)

2016-04-04 Thread prasanthj
Repository: hive
Updated Branches:
  refs/heads/branch-1 b6f6c4acb -> 4ac966cd8


HIVE-13330: ORC vectorized string dictionary reader does not differentiate null 
vs empty string dictionary (Prasanth Jayachandran reviewed by Matt McCline)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4ac966cd
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4ac966cd
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4ac966cd

Branch: refs/heads/branch-1
Commit: 4ac966cd8ea069f0935919a108acf15d6ec7799b
Parents: b6f6c4a
Author: Prasanth Jayachandran 
Authored: Mon Apr 4 19:39:01 2016 -0500
Committer: Prasanth Jayachandran 
Committed: Mon Apr 4 19:39:01 2016 -0500

--
 .../hive/ql/io/orc/TreeReaderFactory.java   | 20 +--
 .../vector_orc_string_reader_empty_dict.q   | 20 +++
 .../vector_orc_string_reader_empty_dict.q.out   | 62 
 3 files changed, 97 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/4ac966cd/ql/src/java/org/apache/hadoop/hive/ql/io/orc/TreeReaderFactory.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/TreeReaderFactory.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/TreeReaderFactory.java
index c8f9595..96df394 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/TreeReaderFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/TreeReaderFactory.java
@@ -1657,6 +1657,7 @@ public class TreeReaderFactory {
* stripe.
*/
   protected static class StringDictionaryTreeReader extends TreeReader {
+private static final byte[] EMPTY_BYTE_ARRAY = new byte[0];
 private DynamicByteArray dictionaryBuffer;
 private int[] dictionaryOffsets;
 protected IntegerReader reader;
@@ -1836,11 +1837,20 @@ public class TreeReaderFactory {
 }
 result.isRepeating = scratchlcv.isRepeating;
   } else {
-// Entire stripe contains null strings.
-result.isRepeating = true;
-result.noNulls = false;
-result.isNull[0] = true;
-result.setRef(0, "".getBytes(), 0, 0);
+if (dictionaryOffsets == null) {
+  // Entire stripe contains null strings.
+  result.isRepeating = true;
+  result.noNulls = false;
+  result.isNull[0] = true;
+  result.setRef(0, EMPTY_BYTE_ARRAY, 0, 0);
+} else {
+  // stripe contains nulls and empty strings
+  for (int i = 0; i < batchSize; i++) {
+if (!result.isNull[i]) {
+  result.setRef(i, EMPTY_BYTE_ARRAY, 0, 0);
+}
+  }
+}
   }
   return result;
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/4ac966cd/ql/src/test/queries/clientpositive/vector_orc_string_reader_empty_dict.q
--
diff --git 
a/ql/src/test/queries/clientpositive/vector_orc_string_reader_empty_dict.q 
b/ql/src/test/queries/clientpositive/vector_orc_string_reader_empty_dict.q
new file mode 100644
index 000..0e8a743
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/vector_orc_string_reader_empty_dict.q
@@ -0,0 +1,20 @@
+create table orcstr (vcol varchar(20)) stored as orc;
+
+insert overwrite table orcstr select null from src;
+
+SET hive.fetch.task.conversion=none;
+
+SET hive.vectorized.execution.enabled=false;
+select vcol from orcstr limit 1;
+
+SET hive.vectorized.execution.enabled=true;
+select vcol from orcstr limit 1;
+
+insert overwrite table orcstr select "" from src;
+
+SET hive.vectorized.execution.enabled=false;
+select vcol from orcstr limit 1;
+
+SET hive.vectorized.execution.enabled=true;
+select vcol from orcstr limit 1;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/4ac966cd/ql/src/test/results/clientpositive/vector_orc_string_reader_empty_dict.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/vector_orc_string_reader_empty_dict.q.out 
b/ql/src/test/results/clientpositive/vector_orc_string_reader_empty_dict.q.out
new file mode 100644
index 000..4f00bed
--- /dev/null
+++ 
b/ql/src/test/results/clientpositive/vector_orc_string_reader_empty_dict.q.out
@@ -0,0 +1,62 @@
+PREHOOK: query: create table orcstr (vcol varchar(20)) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orcstr
+POSTHOOK: query: create table orcstr (vcol varchar(20)) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orcstr
+PREHOOK: query: insert overwrite table orcstr select null from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: 

hive git commit: HIVE-13381 : Timestamp & date should have precedence in type hierarchy than string group (Ashutosh Chauhan via Jason Dere)

2016-04-04 Thread hashutosh
Repository: hive
Updated Branches:
  refs/heads/master 983036358 -> b44650231


HIVE-13381 : Timestamp & date should have precedence in type hierarchy than 
string group (Ashutosh Chauhan via Jason Dere)

Signed-off-by: Ashutosh Chauhan 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b4465023
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b4465023
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b4465023

Branch: refs/heads/master
Commit: b44650231ad2708fa73346164ae9c329ad36d6cb
Parents: 9830363
Author: Ashutosh Chauhan 
Authored: Tue Mar 29 19:01:24 2016 -0700
Committer: Ashutosh Chauhan 
Committed: Mon Apr 4 13:11:07 2016 -0700

--
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |   9 +-
 .../ql/exec/vector/VectorizationContext.java|  12 +-
 .../hive/ql/exec/TestFunctionRegistry.java  |  18 ++-
 .../exec/vector/TestVectorizationContext.java   |  17 +-
 .../queries/clientpositive/cast_on_constant.q   |   7 +
 .../clientpositive/cast_on_constant.q.out   | 160 +++
 6 files changed, 198 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b4465023/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index 56b96b4..1343b39 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -737,7 +737,14 @@ public final class FunctionRegistry {
   return getTypeInfoForPrimitiveCategory(
   (PrimitiveTypeInfo)a, (PrimitiveTypeInfo)b,PrimitiveCategory.STRING);
 }
-
+// timestamp/date is higher precedence than String_GROUP
+if (pgA == PrimitiveGrouping.STRING_GROUP && pgB == 
PrimitiveGrouping.DATE_GROUP) {
+  return b;
+}
+// date/timestamp is higher precedence than String_GROUP
+if (pgB == PrimitiveGrouping.STRING_GROUP && pgA == 
PrimitiveGrouping.DATE_GROUP) {
+  return a;
+}
 // Another special case, because timestamp is not implicitly convertible 
to numeric types.
 if ((pgA == PrimitiveGrouping.NUMERIC_GROUP || pgB == 
PrimitiveGrouping.NUMERIC_GROUP)
 && (pcA == PrimitiveCategory.TIMESTAMP || pcB == 
PrimitiveCategory.TIMESTAMP)) {

http://git-wip-us.apache.org/repos/asf/hive/blob/b4465023/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
index 1eb960d..30a0f5a 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
@@ -155,7 +155,7 @@ public class VectorizationContext {
 
   VectorExpressionDescriptor vMap;
 
-  private List initialColumnNames;
+  private final List initialColumnNames;
 
   private List projectedColumns;
   private List projectionColumnNames;
@@ -712,7 +712,7 @@ public class VectorizationContext {
 genericUdf = new GenericUDFToDate();
 break;
   case TIMESTAMP:
-genericUdf = new GenericUDFToUnixTimeStamp();
+genericUdf = new GenericUDFTimestamp();
 break;
   case INTERVAL_YEAR_MONTH:
 genericUdf = new GenericUDFToIntervalYearMonth();
@@ -1329,7 +1329,7 @@ public class VectorizationContext {
 case INT:
 case LONG:
   return InConstantType.INT_FAMILY;
-  
+
 case DATE:
   return InConstantType.TIMESTAMP;
 
@@ -1339,16 +1339,16 @@ public class VectorizationContext {
 case FLOAT:
 case DOUBLE:
   return InConstantType.FLOAT_FAMILY;
-  
+
 case STRING:
 case CHAR:
 case VARCHAR:
 case BINARY:
   return InConstantType.STRING_FAMILY;
-  
+
 case DECIMAL:
   return InConstantType.DECIMAL;
-  
+
 
 case INTERVAL_YEAR_MONTH:
 case INTERVAL_DAY_TIME:

http://git-wip-us.apache.org/repos/asf/hive/blob/b4465023/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
--
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java 
b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
index 6a83c32..8488c21 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
@@ -253,9 

[2/8] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)

2016-04-04 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote
--
diff --git a/service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote 
b/service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote
new file mode 100755
index 000..9a2322f
--- /dev/null
+++ b/service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote
@@ -0,0 +1,1242 @@
+#!/usr/bin/env python
+#
+# Autogenerated by Thrift Compiler (0.9.3)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+#  options string: py
+#
+
+import sys
+import pprint
+from urlparse import urlparse
+from thrift.transport import TTransport
+from thrift.transport import TSocket
+from thrift.transport import TSSLSocket
+from thrift.transport import THttpClient
+from thrift.protocol import TBinaryProtocol
+
+from hive_service import ThriftHive
+from hive_service.ttypes import *
+
+if len(sys.argv) <= 1 or sys.argv[1] == '--help':
+  print('')
+  print('Usage: ' + sys.argv[0] + ' [-h host[:port]] [-u url] [-f[ramed]] 
[-s[sl]] function [arg1 [arg2...]]')
+  print('')
+  print('Functions:')
+  print('  void execute(string query)')
+  print('  string fetchOne()')
+  print('   fetchN(i32 numRows)')
+  print('   fetchAll()')
+  print('  Schema getSchema()')
+  print('  Schema getThriftSchema()')
+  print('  HiveClusterStatus getClusterStatus()')
+  print('  QueryPlan getQueryPlan()')
+  print('  void clean()')
+  print('  string getMetaConf(string key)')
+  print('  void setMetaConf(string key, string value)')
+  print('  void create_database(Database database)')
+  print('  Database get_database(string name)')
+  print('  void drop_database(string name, bool deleteData, bool cascade)')
+  print('   get_databases(string pattern)')
+  print('   get_all_databases()')
+  print('  void alter_database(string dbname, Database db)')
+  print('  Type get_type(string name)')
+  print('  bool create_type(Type type)')
+  print('  bool drop_type(string type)')
+  print('   get_type_all(string name)')
+  print('   get_fields(string db_name, string table_name)')
+  print('   get_fields_with_environment_context(string db_name, string 
table_name, EnvironmentContext environment_context)')
+  print('   get_schema(string db_name, string table_name)')
+  print('   get_schema_with_environment_context(string db_name, string 
table_name, EnvironmentContext environment_context)')
+  print('  void create_table(Table tbl)')
+  print('  void create_table_with_environment_context(Table tbl, 
EnvironmentContext environment_context)')
+  print('  void drop_table(string dbname, string name, bool deleteData)')
+  print('  void drop_table_with_environment_context(string dbname, string 
name, bool deleteData, EnvironmentContext environment_context)')
+  print('   get_tables(string db_name, string pattern)')
+  print('   get_table_meta(string db_patterns, string tbl_patterns,  
tbl_types)')
+  print('   get_all_tables(string db_name)')
+  print('  Table get_table(string dbname, string tbl_name)')
+  print('   get_table_objects_by_name(string dbname,  tbl_names)')
+  print('   get_table_names_by_filter(string dbname, string filter, i16 
max_tables)')
+  print('  void alter_table(string dbname, string tbl_name, Table new_tbl)')
+  print('  void alter_table_with_environment_context(string dbname, string 
tbl_name, Table new_tbl, EnvironmentContext environment_context)')
+  print('  void alter_table_with_cascade(string dbname, string tbl_name, Table 
new_tbl, bool cascade)')
+  print('  Partition add_partition(Partition new_part)')
+  print('  Partition add_partition_with_environment_context(Partition 
new_part, EnvironmentContext environment_context)')
+  print('  i32 add_partitions( new_parts)')
+  print('  i32 add_partitions_pspec( new_parts)')
+  print('  Partition append_partition(string db_name, string tbl_name,  
part_vals)')
+  print('  AddPartitionsResult add_partitions_req(AddPartitionsRequest 
request)')
+  print('  Partition append_partition_with_environment_context(string db_name, 
string tbl_name,  part_vals, EnvironmentContext environment_context)')
+  print('  Partition append_partition_by_name(string db_name, string tbl_name, 
string part_name)')
+  print('  Partition append_partition_by_name_with_environment_context(string 
db_name, string tbl_name, string part_name, EnvironmentContext 
environment_context)')
+  print('  bool drop_partition(string db_name, string tbl_name,  part_vals, 
bool deleteData)')
+  print('  bool drop_partition_with_environment_context(string db_name, string 
tbl_name,  part_vals, bool deleteData, EnvironmentContext environment_context)')
+  print('  bool drop_partition_by_name(string db_name, string tbl_name, string 
part_name, bool deleteData)')
+  print('  bool drop_partition_by_name_with_environment_context(string 
db_name, string tbl_name, string part_name, bool deleteData, EnvironmentContext 

[1/8] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)

2016-04-04 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master 6a1f8a835 -> 983036358


http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-py/hive_service/ThriftHive.py
--
diff --git a/service/src/gen/thrift/gen-py/hive_service/ThriftHive.py 
b/service/src/gen/thrift/gen-py/hive_service/ThriftHive.py
new file mode 100644
index 000..978c2a3
--- /dev/null
+++ b/service/src/gen/thrift/gen-py/hive_service/ThriftHive.py
@@ -0,0 +1,1674 @@
+#
+# Autogenerated by Thrift Compiler (0.9.3)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+#  options string: py
+#
+
+from thrift.Thrift import TType, TMessageType, TException, 
TApplicationException
+import hive_metastore.ThriftHiveMetastore
+import logging
+from ttypes import *
+from thrift.Thrift import TProcessor
+from thrift.transport import TTransport
+from thrift.protocol import TBinaryProtocol, TProtocol
+try:
+  from thrift.protocol import fastbinary
+except:
+  fastbinary = None
+
+
+class Iface(hive_metastore.ThriftHiveMetastore.Iface):
+  def execute(self, query):
+"""
+Parameters:
+ - query
+"""
+pass
+
+  def fetchOne(self):
+pass
+
+  def fetchN(self, numRows):
+"""
+Parameters:
+ - numRows
+"""
+pass
+
+  def fetchAll(self):
+pass
+
+  def getSchema(self):
+pass
+
+  def getThriftSchema(self):
+pass
+
+  def getClusterStatus(self):
+pass
+
+  def getQueryPlan(self):
+pass
+
+  def clean(self):
+pass
+
+
+class Client(hive_metastore.ThriftHiveMetastore.Client, Iface):
+  def __init__(self, iprot, oprot=None):
+hive_metastore.ThriftHiveMetastore.Client.__init__(self, iprot, oprot)
+
+  def execute(self, query):
+"""
+Parameters:
+ - query
+"""
+self.send_execute(query)
+self.recv_execute()
+
+  def send_execute(self, query):
+self._oprot.writeMessageBegin('execute', TMessageType.CALL, self._seqid)
+args = execute_args()
+args.query = query
+args.write(self._oprot)
+self._oprot.writeMessageEnd()
+self._oprot.trans.flush()
+
+  def recv_execute(self):
+iprot = self._iprot
+(fname, mtype, rseqid) = iprot.readMessageBegin()
+if mtype == TMessageType.EXCEPTION:
+  x = TApplicationException()
+  x.read(iprot)
+  iprot.readMessageEnd()
+  raise x
+result = execute_result()
+result.read(iprot)
+iprot.readMessageEnd()
+if result.ex is not None:
+  raise result.ex
+return
+
+  def fetchOne(self):
+self.send_fetchOne()
+return self.recv_fetchOne()
+
+  def send_fetchOne(self):
+self._oprot.writeMessageBegin('fetchOne', TMessageType.CALL, self._seqid)
+args = fetchOne_args()
+args.write(self._oprot)
+self._oprot.writeMessageEnd()
+self._oprot.trans.flush()
+
+  def recv_fetchOne(self):
+iprot = self._iprot
+(fname, mtype, rseqid) = iprot.readMessageBegin()
+if mtype == TMessageType.EXCEPTION:
+  x = TApplicationException()
+  x.read(iprot)
+  iprot.readMessageEnd()
+  raise x
+result = fetchOne_result()
+result.read(iprot)
+iprot.readMessageEnd()
+if result.success is not None:
+  return result.success
+if result.ex is not None:
+  raise result.ex
+raise TApplicationException(TApplicationException.MISSING_RESULT, 
"fetchOne failed: unknown result")
+
+  def fetchN(self, numRows):
+"""
+Parameters:
+ - numRows
+"""
+self.send_fetchN(numRows)
+return self.recv_fetchN()
+
+  def send_fetchN(self, numRows):
+self._oprot.writeMessageBegin('fetchN', TMessageType.CALL, self._seqid)
+args = fetchN_args()
+args.numRows = numRows
+args.write(self._oprot)
+self._oprot.writeMessageEnd()
+self._oprot.trans.flush()
+
+  def recv_fetchN(self):
+iprot = self._iprot
+(fname, mtype, rseqid) = iprot.readMessageBegin()
+if mtype == TMessageType.EXCEPTION:
+  x = TApplicationException()
+  x.read(iprot)
+  iprot.readMessageEnd()
+  raise x
+result = fetchN_result()
+result.read(iprot)
+iprot.readMessageEnd()
+if result.success is not None:
+  return result.success
+if result.ex is not None:
+  raise result.ex
+raise TApplicationException(TApplicationException.MISSING_RESULT, "fetchN 
failed: unknown result")
+
+  def fetchAll(self):
+self.send_fetchAll()
+return self.recv_fetchAll()
+
+  def send_fetchAll(self):
+self._oprot.writeMessageBegin('fetchAll', TMessageType.CALL, self._seqid)
+args = fetchAll_args()
+args.write(self._oprot)
+self._oprot.writeMessageEnd()
+self._oprot.trans.flush()
+
+  def recv_fetchAll(self):
+iprot = self._iprot
+(fname, mtype, rseqid) = iprot.readMessageBegin()
+if mtype == TMessageType.EXCEPTION:
+  x = TApplicationException()
+  x.read(iprot)
+  iprot.readMessageEnd()
+  raise x
+result = fetchAll_result()
+

[4/8] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)

2016-04-04 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java
--
diff --git 
a/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java
 
b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java
new file mode 100644
index 000..934a8a5
--- /dev/null
+++ 
b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java
@@ -0,0 +1,7784 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.service;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+public class ThriftHive {
+
+  public interface Iface extends 
org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface {
+
+public void execute(String query) throws HiveServerException, 
org.apache.thrift.TException;
+
+public String fetchOne() throws HiveServerException, 
org.apache.thrift.TException;
+
+public List fetchN(int numRows) throws HiveServerException, 
org.apache.thrift.TException;
+
+public List fetchAll() throws HiveServerException, 
org.apache.thrift.TException;
+
+public org.apache.hadoop.hive.metastore.api.Schema getSchema() throws 
HiveServerException, org.apache.thrift.TException;
+
+public org.apache.hadoop.hive.metastore.api.Schema getThriftSchema() 
throws HiveServerException, org.apache.thrift.TException;
+
+public HiveClusterStatus getClusterStatus() throws HiveServerException, 
org.apache.thrift.TException;
+
+public org.apache.hadoop.hive.ql.plan.api.QueryPlan getQueryPlan() throws 
HiveServerException, org.apache.thrift.TException;
+
+public void clean() throws org.apache.thrift.TException;
+
+  }
+
+  public interface AsyncIface extends 
org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore .AsyncIface {
+
+public void execute(String query, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
+
+public void fetchOne(org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
+
+public void fetchN(int numRows, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
+
+public void fetchAll(org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
+
+public void getSchema(org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
+
+public void getThriftSchema(org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
+
+public void getClusterStatus(org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
+
+public void getQueryPlan(org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
+
+public void clean(org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
+
+  }
+
+  public static class Client extends 
org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Client implements 
Iface {
+public static class Factory implements 
org.apache.thrift.TServiceClientFactory {
+  public Factory() {}
+  public Client getClient(org.apache.thrift.protocol.TProtocol prot) {
+return new Client(prot);
+  }
+  public Client getClient(org.apache.thrift.protocol.TProtocol iprot, 
org.apache.thrift.protocol.TProtocol oprot) {
+return new Client(iprot, oprot);
+  }
+}
+
+public Client(org.apache.thrift.protocol.TProtocol prot)
+{
+  super(prot, prot);
+}
+
+public Client(org.apache.thrift.protocol.TProtocol iprot, 
org.apache.thrift.protocol.TProtocol oprot) {
+  super(iprot, oprot);
+}
+
+public void execute(String query) throws 

[8/8] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)

2016-04-04 Thread weiz
HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, 
reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/98303635
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/98303635
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/98303635

Branch: refs/heads/master
Commit: 983036358633cfbb6aec30003faac8280372b2c9
Parents: 6a1f8a8
Author: Wei Zheng 
Authored: Mon Apr 4 11:18:25 2016 -0700
Committer: Wei Zheng 
Committed: Mon Apr 4 11:18:25 2016 -0700

--
 service-rpc/src/gen/thrift/gen-py/__init__.py   |0
 service/src/gen/thrift/gen-cpp/ThriftHive.cpp   | 3544 
 service/src/gen/thrift/gen-cpp/ThriftHive.h | 1224 +++
 .../gen-cpp/ThriftHive_server.skeleton.cpp  |   84 +
 .../thrift/gen-cpp/hive_service_constants.cpp   |   17 +
 .../gen/thrift/gen-cpp/hive_service_constants.h |   24 +
 .../gen/thrift/gen-cpp/hive_service_types.cpp   |  351 +
 .../src/gen/thrift/gen-cpp/hive_service_types.h |  176 +
 .../hadoop/hive/service/HiveClusterStatus.java  |  901 ++
 .../hive/service/HiveServerException.java   |  601 ++
 .../hadoop/hive/service/JobTrackerState.java|   45 +
 .../apache/hadoop/hive/service/ThriftHive.java  | 7784 ++
 service/src/gen/thrift/gen-php/ThriftHive.php   | 1943 +
 service/src/gen/thrift/gen-php/Types.php|  338 +
 service/src/gen/thrift/gen-py/__init__.py   |0
 .../gen-py/hive_service/ThriftHive-remote   | 1242 +++
 .../thrift/gen-py/hive_service/ThriftHive.py| 1674 
 .../gen/thrift/gen-py/hive_service/__init__.py  |1 +
 .../gen/thrift/gen-py/hive_service/constants.py |   11 +
 .../gen/thrift/gen-py/hive_service/ttypes.py|  260 +
 .../gen/thrift/gen-rb/hive_service_constants.rb |9 +
 .../src/gen/thrift/gen-rb/hive_service_types.rb |   68 +
 service/src/gen/thrift/gen-rb/thrift_hive.rb|  555 ++
 23 files changed, 20852 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service-rpc/src/gen/thrift/gen-py/__init__.py
--
diff --git a/service-rpc/src/gen/thrift/gen-py/__init__.py 
b/service-rpc/src/gen/thrift/gen-py/__init__.py
new file mode 100644
index 000..e69de29



[6/8] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)

2016-04-04 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-cpp/ThriftHive.h
--
diff --git a/service/src/gen/thrift/gen-cpp/ThriftHive.h 
b/service/src/gen/thrift/gen-cpp/ThriftHive.h
new file mode 100644
index 000..902bd4b
--- /dev/null
+++ b/service/src/gen/thrift/gen-cpp/ThriftHive.h
@@ -0,0 +1,1224 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+#ifndef ThriftHive_H
+#define ThriftHive_H
+
+#include 
+#include 
+#include "hive_service_types.h"
+#include "ThriftHiveMetastore.h"
+
+namespace Apache { namespace Hadoop { namespace Hive {
+
+#ifdef _WIN32
+  #pragma warning( push )
+  #pragma warning (disable : 4250 ) //inheriting methods via dominance 
+#endif
+
+class ThriftHiveIf : virtual public  
::Apache::Hadoop::Hive::ThriftHiveMetastoreIf {
+ public:
+  virtual ~ThriftHiveIf() {}
+  virtual void execute(const std::string& query) = 0;
+  virtual void fetchOne(std::string& _return) = 0;
+  virtual void fetchN(std::vector & _return, const int32_t 
numRows) = 0;
+  virtual void fetchAll(std::vector & _return) = 0;
+  virtual void getSchema( ::Apache::Hadoop::Hive::Schema& _return) = 0;
+  virtual void getThriftSchema( ::Apache::Hadoop::Hive::Schema& _return) = 0;
+  virtual void getClusterStatus(HiveClusterStatus& _return) = 0;
+  virtual void getQueryPlan( ::Apache::Hadoop::Hive::QueryPlan& _return) = 0;
+  virtual void clean() = 0;
+};
+
+class ThriftHiveIfFactory : virtual public  
::Apache::Hadoop::Hive::ThriftHiveMetastoreIfFactory {
+ public:
+  typedef ThriftHiveIf Handler;
+
+  virtual ~ThriftHiveIfFactory() {}
+
+  virtual ThriftHiveIf* getHandler(const ::apache::thrift::TConnectionInfo& 
connInfo) = 0;
+  virtual void releaseHandler( ::facebook::fb303::FacebookServiceIf* /* 
handler */) = 0;
+};
+
+class ThriftHiveIfSingletonFactory : virtual public ThriftHiveIfFactory {
+ public:
+  ThriftHiveIfSingletonFactory(const boost::shared_ptr& iface) : 
iface_(iface) {}
+  virtual ~ThriftHiveIfSingletonFactory() {}
+
+  virtual ThriftHiveIf* getHandler(const ::apache::thrift::TConnectionInfo&) {
+return iface_.get();
+  }
+  virtual void releaseHandler( ::facebook::fb303::FacebookServiceIf* /* 
handler */) {}
+
+ protected:
+  boost::shared_ptr iface_;
+};
+
+class ThriftHiveNull : virtual public ThriftHiveIf , virtual public  
::Apache::Hadoop::Hive::ThriftHiveMetastoreNull {
+ public:
+  virtual ~ThriftHiveNull() {}
+  void execute(const std::string& /* query */) {
+return;
+  }
+  void fetchOne(std::string& /* _return */) {
+return;
+  }
+  void fetchN(std::vector & /* _return */, const int32_t /* 
numRows */) {
+return;
+  }
+  void fetchAll(std::vector & /* _return */) {
+return;
+  }
+  void getSchema( ::Apache::Hadoop::Hive::Schema& /* _return */) {
+return;
+  }
+  void getThriftSchema( ::Apache::Hadoop::Hive::Schema& /* _return */) {
+return;
+  }
+  void getClusterStatus(HiveClusterStatus& /* _return */) {
+return;
+  }
+  void getQueryPlan( ::Apache::Hadoop::Hive::QueryPlan& /* _return */) {
+return;
+  }
+  void clean() {
+return;
+  }
+};
+
+typedef struct _ThriftHive_execute_args__isset {
+  _ThriftHive_execute_args__isset() : query(false) {}
+  bool query :1;
+} _ThriftHive_execute_args__isset;
+
+class ThriftHive_execute_args {
+ public:
+
+  ThriftHive_execute_args(const ThriftHive_execute_args&);
+  ThriftHive_execute_args& operator=(const ThriftHive_execute_args&);
+  ThriftHive_execute_args() : query() {
+  }
+
+  virtual ~ThriftHive_execute_args() throw();
+  std::string query;
+
+  _ThriftHive_execute_args__isset __isset;
+
+  void __set_query(const std::string& val);
+
+  bool operator == (const ThriftHive_execute_args & rhs) const
+  {
+if (!(query == rhs.query))
+  return false;
+return true;
+  }
+  bool operator != (const ThriftHive_execute_args ) const {
+return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHive_execute_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHive_execute_pargs {
+ public:
+
+
+  virtual ~ThriftHive_execute_pargs() throw();
+  const std::string* query;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHive_execute_result__isset {
+  _ThriftHive_execute_result__isset() : ex(false) {}
+  bool ex :1;
+} _ThriftHive_execute_result__isset;
+
+class ThriftHive_execute_result {
+ public:
+
+  ThriftHive_execute_result(const ThriftHive_execute_result&);
+  ThriftHive_execute_result& operator=(const ThriftHive_execute_result&);
+  ThriftHive_execute_result() {
+  }
+
+  virtual ~ThriftHive_execute_result() throw();
+  HiveServerException ex;
+
+  _ThriftHive_execute_result__isset __isset;
+
+  void __set_ex(const 

[7/8] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)

2016-04-04 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-cpp/ThriftHive.cpp
--
diff --git a/service/src/gen/thrift/gen-cpp/ThriftHive.cpp 
b/service/src/gen/thrift/gen-cpp/ThriftHive.cpp
new file mode 100644
index 000..a5448f0
--- /dev/null
+++ b/service/src/gen/thrift/gen-cpp/ThriftHive.cpp
@@ -0,0 +1,3544 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+#include "ThriftHive.h"
+
+namespace Apache { namespace Hadoop { namespace Hive {
+
+
+ThriftHive_execute_args::~ThriftHive_execute_args() throw() {
+}
+
+
+uint32_t ThriftHive_execute_args::read(::apache::thrift::protocol::TProtocol* 
iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+xfer += iprot->readFieldBegin(fname, ftype, fid);
+if (ftype == ::apache::thrift::protocol::T_STOP) {
+  break;
+}
+switch (fid)
+{
+  case 1:
+if (ftype == ::apache::thrift::protocol::T_STRING) {
+  xfer += iprot->readString(this->query);
+  this->__isset.query = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  default:
+xfer += iprot->skip(ftype);
+break;
+}
+xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHive_execute_args::write(::apache::thrift::protocol::TProtocol* 
oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHive_execute_args");
+
+  xfer += oprot->writeFieldBegin("query", 
::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->query);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHive_execute_pargs::~ThriftHive_execute_pargs() throw() {
+}
+
+
+uint32_t 
ThriftHive_execute_pargs::write(::apache::thrift::protocol::TProtocol* oprot) 
const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHive_execute_pargs");
+
+  xfer += oprot->writeFieldBegin("query", 
::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString((*(this->query)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHive_execute_result::~ThriftHive_execute_result() throw() {
+}
+
+
+uint32_t 
ThriftHive_execute_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+xfer += iprot->readFieldBegin(fname, ftype, fid);
+if (ftype == ::apache::thrift::protocol::T_STOP) {
+  break;
+}
+switch (fid)
+{
+  case 1:
+if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+  xfer += this->ex.read(iprot);
+  this->__isset.ex = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  default:
+xfer += iprot->skip(ftype);
+break;
+}
+xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t 
ThriftHive_execute_result::write(::apache::thrift::protocol::TProtocol* oprot) 
const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("ThriftHive_execute_result");
+
+  if (this->__isset.ex) {
+xfer += oprot->writeFieldBegin("ex", ::apache::thrift::protocol::T_STRUCT, 
1);
+xfer += this->ex.write(oprot);
+xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHive_execute_presult::~ThriftHive_execute_presult() throw() {
+}
+
+
+uint32_t 
ThriftHive_execute_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+xfer += iprot->readFieldBegin(fname, ftype, fid);
+if (ftype == ::apache::thrift::protocol::T_STOP) {
+  break;
+}
+switch (fid)
+{
+  case 1:
+if (ftype == 

[3/8] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)

2016-04-04 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-php/ThriftHive.php
--
diff --git a/service/src/gen/thrift/gen-php/ThriftHive.php 
b/service/src/gen/thrift/gen-php/ThriftHive.php
new file mode 100644
index 000..23dc8fd
--- /dev/null
+++ b/service/src/gen/thrift/gen-php/ThriftHive.php
@@ -0,0 +1,1943 @@
+send_execute($query);
+$this->recv_execute();
+  }
+
+  public function send_execute($query)
+  {
+$args = new \ThriftHive_execute_args();
+$args->query = $query;
+$bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_write_binary');
+if ($bin_accel)
+{
+  thrift_protocol_write_binary($this->output_, 'execute', 
TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+}
+else
+{
+  $this->output_->writeMessageBegin('execute', TMessageType::CALL, 
$this->seqid_);
+  $args->write($this->output_);
+  $this->output_->writeMessageEnd();
+  $this->output_->getTransport()->flush();
+}
+  }
+
+  public function recv_execute()
+  {
+$bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_read_binary');
+if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 
'\ThriftHive_execute_result', $this->input_->isStrictRead());
+else
+{
+  $rseqid = 0;
+  $fname = null;
+  $mtype = 0;
+
+  $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+  if ($mtype == TMessageType::EXCEPTION) {
+$x = new TApplicationException();
+$x->read($this->input_);
+$this->input_->readMessageEnd();
+throw $x;
+  }
+  $result = new \ThriftHive_execute_result();
+  $result->read($this->input_);
+  $this->input_->readMessageEnd();
+}
+if ($result->ex !== null) {
+  throw $result->ex;
+}
+return;
+  }
+
+  public function fetchOne()
+  {
+$this->send_fetchOne();
+return $this->recv_fetchOne();
+  }
+
+  public function send_fetchOne()
+  {
+$args = new \ThriftHive_fetchOne_args();
+$bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_write_binary');
+if ($bin_accel)
+{
+  thrift_protocol_write_binary($this->output_, 'fetchOne', 
TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+}
+else
+{
+  $this->output_->writeMessageBegin('fetchOne', TMessageType::CALL, 
$this->seqid_);
+  $args->write($this->output_);
+  $this->output_->writeMessageEnd();
+  $this->output_->getTransport()->flush();
+}
+  }
+
+  public function recv_fetchOne()
+  {
+$bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_read_binary');
+if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 
'\ThriftHive_fetchOne_result', $this->input_->isStrictRead());
+else
+{
+  $rseqid = 0;
+  $fname = null;
+  $mtype = 0;
+
+  $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+  if ($mtype == TMessageType::EXCEPTION) {
+$x = new TApplicationException();
+$x->read($this->input_);
+$this->input_->readMessageEnd();
+throw $x;
+  }
+  $result = new \ThriftHive_fetchOne_result();
+  $result->read($this->input_);
+  $this->input_->readMessageEnd();
+}
+if ($result->success !== null) {
+  return $result->success;
+}
+if ($result->ex !== null) {
+  throw $result->ex;
+}
+throw new \Exception("fetchOne failed: unknown result");
+  }
+
+  public function fetchN($numRows)
+  {
+$this->send_fetchN($numRows);
+return $this->recv_fetchN();
+  }
+
+  public function send_fetchN($numRows)
+  {
+$args = new \ThriftHive_fetchN_args();
+$args->numRows = $numRows;
+$bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_write_binary');
+if ($bin_accel)
+{
+  thrift_protocol_write_binary($this->output_, 'fetchN', 
TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+}
+else
+{
+  $this->output_->writeMessageBegin('fetchN', TMessageType::CALL, 
$this->seqid_);
+  $args->write($this->output_);
+  $this->output_->writeMessageEnd();
+  $this->output_->getTransport()->flush();
+}
+  }
+
+  public function recv_fetchN()
+  {
+$bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_read_binary');
+if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 
'\ThriftHive_fetchN_result', $this->input_->isStrictRead());
+else
+{
+  $rseqid = 0;
+  $fname = null;
+  $mtype = 0;
+
+  $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+  if ($mtype == 

[5/8] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)

2016-04-04 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveServerException.java
--
diff --git 
a/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveServerException.java
 
b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveServerException.java
new file mode 100644
index 000..97b1219
--- /dev/null
+++ 
b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveServerException.java
@@ -0,0 +1,601 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.service;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+public class HiveServerException extends TException implements 
org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("HiveServerException");
+
+  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = 
new org.apache.thrift.protocol.TField("message", 
org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField ERROR_CODE_FIELD_DESC 
= new org.apache.thrift.protocol.TField("errorCode", 
org.apache.thrift.protocol.TType.I32, (short)2);
+  private static final org.apache.thrift.protocol.TField SQLSTATE_FIELD_DESC = 
new org.apache.thrift.protocol.TField("SQLState", 
org.apache.thrift.protocol.TType.STRING, (short)3);
+
+  private static final Map schemes = 
new HashMap();
+  static {
+schemes.put(StandardScheme.class, new 
HiveServerExceptionStandardSchemeFactory());
+schemes.put(TupleScheme.class, new 
HiveServerExceptionTupleSchemeFactory());
+  }
+
+  private String message; // required
+  private int errorCode; // required
+  private String SQLState; // required
+
+  /** The set of fields this struct contains, along with convenience methods 
for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+MESSAGE((short)1, "message"),
+ERROR_CODE((short)2, "errorCode"),
+SQLSTATE((short)3, "SQLState");
+
+private static final Map byName = new HashMap();
+
+static {
+  for (_Fields field : EnumSet.allOf(_Fields.class)) {
+byName.put(field.getFieldName(), field);
+  }
+}
+
+/**
+ * Find the _Fields constant that matches fieldId, or null if its not 
found.
+ */
+public static _Fields findByThriftId(int fieldId) {
+  switch(fieldId) {
+case 1: // MESSAGE
+  return MESSAGE;
+case 2: // ERROR_CODE
+  return ERROR_CODE;
+case 3: // SQLSTATE
+  return SQLSTATE;
+default:
+  return null;
+  }
+}
+
+/**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+public static _Fields findByThriftIdOrThrow(int fieldId) {
+  _Fields fields = findByThriftId(fieldId);
+  if (fields == null) throw new IllegalArgumentException("Field " + 
fieldId + " doesn't exist!");
+  return fields;
+}
+
+/**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+public static _Fields findByName(String name) {
+  return byName.get(name);
+}
+
+private final short _thriftId;
+private final String _fieldName;
+
+_Fields(short thriftId, String fieldName) {
+  _thriftId = thriftId;
+  _fieldName = fieldName;
+}
+
+public short getThriftFieldId() {
+  return _thriftId;
+}
+
+public String getFieldName() {
+  return _fieldName;
+}
+  }
+
+  // isset id assignments
+  private 

[41/50] [abbrv] hive git commit: HIVE-12988 : Improve dynamic partition loading IV (Ashutosh Chauhan via Prasanth J)

2016-04-04 Thread jdere
HIVE-12988 : Improve dynamic partition loading IV (Ashutosh Chauhan via 
Prasanth J)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a14ef8ab
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a14ef8ab
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a14ef8ab

Branch: refs/heads/llap
Commit: a14ef8abe1df1516b8b9f486030bc3d584f940a9
Parents: 1de97bc
Author: Ashutosh Chauhan 
Authored: Tue Feb 2 18:03:44 2016 -0800
Committer: Ashutosh Chauhan 
Committed: Tue Mar 29 11:27:12 2016 -0700

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   4 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java| 252 +++
 .../org/apache/hadoop/fs/ProxyFileSystem.java   |   5 +-
 3 files changed, 155 insertions(+), 106 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/a14ef8ab/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index b8870f2..f03c1ab 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2326,6 +2326,8 @@ public class HiveConf extends Configuration {
 HIVE_SECURITY_COMMAND_WHITELIST("hive.security.command.whitelist", 
"set,reset,dfs,add,list,delete,reload,compile",
 "Comma separated list of non-SQL Hive commands users are authorized to 
execute"),
 
+ HIVE_MOVE_FILES_THREAD_COUNT("hive.mv.files.thread", 25, new  
SizeValidator(1L, true, 1024L, true), "Number of threads"
+ + " used to move files in move task"),
 // If this is set all move tasks at the end of a multi-insert query will 
only begin once all
 // outputs are ready
 HIVE_MULTI_INSERT_MOVE_TASKS_SHARE_DEPENDENCIES(
@@ -2771,7 +2773,7 @@ public class HiveConf extends Configuration {
 SPARK_RPC_SASL_MECHANISM("hive.spark.client.rpc.sasl.mechanisms", 
"DIGEST-MD5",
   "Name of the SASL mechanism to use for authentication."),
 SPARK_RPC_SERVER_ADDRESS("hive.spark.client.rpc.server.address", "",
-  "The server address of HiverServer2 host to be used for communication 
between Hive client and remote Spark driver. " + 
+  "The server address of HiverServer2 host to be used for communication 
between Hive client and remote Spark driver. " +
   "Default is empty, which means the address will be determined in the 
same way as for hive.server2.thrift.bind.host." +
   "This is only necessary if the host has mutiple network addresses and if 
a different network address other than " +
   "hive.server2.thrift.bind.host is to be used."),

http://git-wip-us.apache.org/repos/asf/hive/blob/a14ef8ab/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 6d27f55..c27481f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -32,19 +32,25 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
-import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.LinkedHashSet;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import com.google.common.collect.ImmutableMap;
+
 import javax.jdo.JDODataStoreException;
 
 import org.apache.hadoop.conf.Configuration;
@@ -132,6 +138,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 /**
  * This class has functions that implement meta data/DDL operations using calls
@@ -1504,7 +1511,7 @@ public class Hive {
 isSrcLocal);
   } else {
 if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && 
!tbl.isTemporary() && oldPart != null) {
-  newFiles = new ArrayList<>();
+  newFiles = Collections.synchronizedList(new ArrayList());
 }
 
 FileSystem fs = tbl.getDataLocation().getFileSystem(conf);
@@ -1751,9 +1758,13 @@ private void constructOneLBLocationMap(FileStatus fSta,

[48/50] [abbrv] hive git commit: Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/hive

2016-04-04 Thread jdere
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/hive


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/51efcb80
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/51efcb80
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/51efcb80

Branch: refs/heads/llap
Commit: 51efcb80e1d09e490dab644bcad2cab54c99e353
Parents: 39d66a4 8c1f055
Author: Dmitry Tolpeko 
Authored: Wed Mar 30 00:23:51 2016 -0700
Committer: Dmitry Tolpeko 
Committed: Wed Mar 30 00:23:51 2016 -0700

--
 .../apache/hadoop/hive/ant/GenVectorCode.java   |  531 -
 .../apache/hadoop/hive/common/FileUtils.java|   54 +
 .../hive/common/type/HiveIntervalDayTime.java   |  245 
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   15 +-
 .../org/apache/hive/common/util/DateUtils.java  |   19 -
 .../hive/contrib/serde2/MultiDelimitSerDe.java  |   23 +-
 data/files/alltypesorc3xcols|  Bin 0 -> 1504592 bytes
 data/files/timestamps.txt   |   50 +
 .../hive/hcatalog/streaming/HiveEndPoint.java   |   11 +
 .../hcatalog/templeton/SecureProxySupport.java  |   46 +-
 .../listener/TestDbNotificationListener.java|   18 +
 .../org/apache/hive/jdbc/miniHS2/MiniHS2.java   |   56 +-
 .../hive/metastore/TestHiveMetaStore.java   |   20 +-
 .../jdbc/TestJdbcWithLocalClusterSpark.java |2 +-
 .../apache/hive/jdbc/TestJdbcWithMiniMr.java|2 +-
 ...stMultiSessionsHS2WithLocalClusterSpark.java |6 +-
 .../jdbc/TestServiceDiscoveryWithMiniHS2.java   |  132 +++
 .../jdbc/authorization/TestHS2AuthzContext.java |4 +-
 .../authorization/TestJdbcMetadataApiAuth.java  |2 +-
 .../TestJdbcWithSQLAuthorization.java   |2 +-
 .../test/resources/testconfiguration.properties |5 +
 .../hive/jdbc/ZooKeeperHiveClientHelper.java|   21 +-
 .../hadoop/hive/llap/io/api/LlapProxy.java  |2 +
 .../org/apache/hadoop/hive/llap/LlapUtil.java   |   26 +
 .../apache/hadoop/hive/llap/tez/Converters.java |1 +
 .../hadoop/hive/llap/tez/TestConverters.java|  190 +++
 .../llap/IncrementalObjectSizeEstimator.java|   54 +-
 .../hadoop/hive/llap/cache/LlapDataBuffer.java  |   12 +-
 .../hive/llap/cache/LowLevelCacheImpl.java  |   35 +-
 .../llap/cache/LowLevelCacheMemoryManager.java  |6 +-
 .../llap/cache/LowLevelFifoCachePolicy.java |4 +-
 .../llap/cache/LowLevelLrfuCachePolicy.java |   14 +-
 .../hive/llap/cache/SimpleBufferManager.java|8 +-
 .../hive/llap/daemon/impl/LlapDaemon.java   |6 +-
 .../hive/llap/io/api/impl/LlapInputFormat.java  |   32 +-
 .../hive/llap/io/api/impl/LlapIoImpl.java   |   21 +-
 .../llap/io/decode/OrcColumnVectorProducer.java |4 +-
 .../llap/io/encoded/OrcEncodedDataReader.java   |   95 +-
 .../hadoop/hive/llap/old/BufferInProgress.java  |   82 --
 .../apache/hadoop/hive/llap/old/BufferPool.java |  225 
 .../hadoop/hive/llap/old/CachePolicy.java   |   34 -
 .../apache/hadoop/hive/llap/old/ChunkPool.java  |  237 
 .../resources/llap-daemon-log4j2.properties |   14 +-
 .../hive/metastore/MetaStoreDirectSql.java  |   32 +-
 .../hadoop/hive/metastore/ObjectStore.java  |2 +-
 .../hive/metastore/StatObjectConverter.java |2 +-
 .../hadoop/hive/metastore/hbase/HBaseUtils.java |8 +-
 .../hadoop/hive/metastore/hbase/StatsCache.java |   20 +-
 .../stats/BinaryColumnStatsAggregator.java  |   43 +-
 .../stats/BooleanColumnStatsAggregator.java |   42 +-
 .../hbase/stats/ColumnStatsAggregator.java  |   12 +-
 .../stats/ColumnStatsAggregatorFactory.java |8 +-
 .../stats/DecimalColumnStatsAggregator.java |  340 +-
 .../stats/DoubleColumnStatsAggregator.java  |  307 -
 .../hbase/stats/IExtrapolatePartStatus.java |   30 +
 .../hbase/stats/LongColumnStatsAggregator.java  |  305 -
 .../stats/StringColumnStatsAggregator.java  |   85 +-
 ...stHBaseAggregateStatsCacheWithBitVector.java |6 +-
 .../TestHBaseAggregateStatsExtrapolation.java   |  717 
 .../TestHBaseAggregateStatsNDVUniformDist.java  |  581 ++
 orc/src/java/org/apache/orc/OrcFile.java|   21 +-
 .../java/org/apache/orc/impl/WriterImpl.java|   41 +-
 ...eColumnArithmeticIntervalYearMonthColumn.txt |   56 +-
 ...eColumnArithmeticIntervalYearMonthScalar.txt |   55 +-
 .../DateColumnArithmeticTimestampColumn.txt |  141 ++-
 .../DateColumnArithmeticTimestampColumnBase.txt |  171 ---
 .../DateColumnArithmeticTimestampScalar.txt |  113 +-
 .../DateColumnArithmeticTimestampScalarBase.txt |  137 ---
 ...eScalarArithmeticIntervalYearMonthColumn.txt |   53 +-
 .../DateScalarArithmeticTimestampColumn.txt |  108 +-
 .../DateScalarArithmeticTimestampColumnBase.txt |  147 ---
 ...ayTimeColumnCompareIntervalDayTimeColumn.txt |   52 -
 

[38/50] [abbrv] hive git commit: revert HIVE-12531 : Implement fast-path for Year/Month UDFs for dates between 1999 and 2038 (Jason Dere via Sergey Shelukhin)

2016-04-04 Thread jdere
revert HIVE-12531 : Implement fast-path for Year/Month UDFs for dates between 
1999 and 2038 (Jason Dere via Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/09b00fc8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/09b00fc8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/09b00fc8

Branch: refs/heads/llap
Commit: 09b00fc863d19cf513fe1d188bb671f370f64c2d
Parents: ff10f03
Author: Jason Dere 
Authored: Tue Mar 29 11:16:32 2016 -0700
Committer: Jason Dere 
Committed: Tue Mar 29 11:16:32 2016 -0700

--
 .../org/apache/hadoop/hive/ql/udf/UDFMonth.java | 16 
 .../java/org/apache/hadoop/hive/ql/udf/UDFYear.java | 16 
 .../expressions/TestVectorDateExpressions.java  | 13 +++--
 3 files changed, 19 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/09b00fc8/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFMonth.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFMonth.java 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFMonth.java
index 05afb8e..8c2b0e4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFMonth.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFMonth.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.hive.ql.udf;
 
 import java.text.ParseException;
 import java.text.SimpleDateFormat;
+import java.util.Calendar;
 import java.util.Date;
-import org.joda.time.MutableDateTime;
 
 import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.UDF;
@@ -53,7 +53,7 @@ import org.apache.hadoop.io.Text;
 @NDV(maxNdv = 31)
 public class UDFMonth extends UDF {
   private final SimpleDateFormat formatter = new 
SimpleDateFormat("-MM-dd");
-  private transient final MutableDateTime mdt = new MutableDateTime();
+  private final Calendar calendar = Calendar.getInstance();
 
   private final IntWritable result = new IntWritable();
 
@@ -75,8 +75,8 @@ public class UDFMonth extends UDF {
 }
 try {
   Date date = formatter.parse(dateString.toString());
-  mdt.setMillis(date.getTime());
-  result.set(mdt.getMonthOfYear());
+  calendar.setTime(date);
+  result.set(1 + calendar.get(Calendar.MONTH));
   return result;
 } catch (ParseException e) {
   return null;
@@ -88,8 +88,8 @@ public class UDFMonth extends UDF {
   return null;
 }
 
-mdt.setMillis(d.get().getTime());
-result.set(mdt.getMonthOfYear());
+calendar.setTime(d.get());
+result.set(1 + calendar.get(Calendar.MONTH));
 return result;
   }
 
@@ -98,8 +98,8 @@ public class UDFMonth extends UDF {
   return null;
 }
 
-mdt.setMillis(t.getTimestamp().getTime());
-result.set(mdt.getMonthOfYear());
+calendar.setTime(t.getTimestamp());
+result.set(1 + calendar.get(Calendar.MONTH));
 return result;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/09b00fc8/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFYear.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFYear.java 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFYear.java
index fb3a655..d7ecd8c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFYear.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFYear.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.hive.ql.udf;
 
 import java.text.ParseException;
 import java.text.SimpleDateFormat;
+import java.util.Calendar;
 import java.util.Date;
-import org.joda.time.MutableDateTime;
 
 import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.UDF;
@@ -53,7 +53,7 @@ import org.apache.hadoop.io.Text;
 @NDV(maxNdv = 20) // although technically its unbounded, its unlikely we will 
ever see ndv > 20
 public class UDFYear extends UDF {
   private final SimpleDateFormat formatter = new 
SimpleDateFormat("-MM-dd");
-  private transient final MutableDateTime mdt = new MutableDateTime();
+  private final Calendar calendar = Calendar.getInstance();
 
   private final IntWritable result = new IntWritable();
 
@@ -77,8 +77,8 @@ public class UDFYear extends UDF {
 
 try {
   Date date = formatter.parse(dateString.toString());
-  mdt.setMillis(date.getTime());
-  result.set(mdt.getYear());
+  calendar.setTime(date);
+  result.set(calendar.get(Calendar.YEAR));
   return result;
 } catch (ParseException e) {
   return null;
@@ -90,8 +90,8 @@ public class UDFYear extends UDF {
   return null;
 }
 
-mdt.setMillis(d.get().getTime());
-result.set(mdt.getYear());
+calendar.setTime(d.get());
+result.set(calendar.get(Calendar.YEAR));
 

[32/50] [abbrv] hive git commit: HIVE-13111: Fix timestamp / interval_day_time wrong results with HIVE-9862 (Matt McCline, reviewed by Jason Dere)

2016-04-04 Thread jdere
http://git-wip-us.apache.org/repos/asf/hive/blob/52016296/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticDateScalarBase.txt
--
diff --git 
a/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticDateScalarBase.txt
 
b/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticDateScalarBase.txt
deleted file mode 100644
index c2ddd67..000
--- 
a/ql/src/gen/vectorization/ExpressionTemplates/TimestampColumnArithmeticDateScalarBase.txt
+++ /dev/null
@@ -1,126 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.exec.vector.expressions.gen;
-
-import org.apache.hadoop.hive.common.type.PisaTimestamp;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
-import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
-import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.NullUtil;
-import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
-import org.apache.hadoop.hive.ql.exec.vector.*;
-import org.apache.hadoop.hive.serde2.io.DateWritable;
-
-/**
- * Generated from template TimestampColumnArithmeticDateScalarBase.txt, which 
covers binary arithmetic
- * expressions between a column and a scalar.
- */
-public abstract class  extends VectorExpression {
-
-  private static final long serialVersionUID = 1L;
-
-  private int colNum;
-  private PisaTimestamp value;
-  private int outputColumn;
-  private PisaTimestamp scratchPisaTimestamp;
-
-  public (int colNum, long value, int outputColumn) {
-this.colNum = colNum;
-this.value = new 
PisaTimestamp().updateFromTimestampMilliseconds(DateWritable.daysToMillis((int) 
value));
-this.outputColumn = outputColumn;
-scratchPisaTimestamp = new PisaTimestamp();
-  }
-
-  public () {
-  }
-
-  @Override
-  public void evaluate(VectorizedRowBatch batch) {
-
-if (childExpressions != null) {
-  super.evaluateChildren(batch);
-}
-
-// Input #1 is type Timestamp (PisaTimestamp).
-TimestampColumnVector inputColVector1 = (TimestampColumnVector) 
batch.cols[colNum];
-
-// Output is type Timestamp.
-TimestampColumnVector outputColVector = (TimestampColumnVector) 
batch.cols[outputColumn];
-
-int[] sel = batch.selected;
-boolean[] inputIsNull = inputColVector1.isNull;
-boolean[] outputIsNull = outputColVector.isNull;
-outputColVector.noNulls = inputColVector1.noNulls;
-outputColVector.isRepeating = inputColVector1.isRepeating;
-int n = batch.size;
-
-// return immediately if batch is empty
-if (n == 0) {
-  return;
-}
-
-if (inputColVector1.isRepeating) {
-  outputColVector.(
-  inputColVector1.asScratchPisaTimestamp(0), value, 0);
-
-  // Even if there are no nulls, we always copy over entry 0. Simplifies 
code.
-  outputIsNull[0] = inputIsNull[0];
-} else if (inputColVector1.noNulls) {
-  if (batch.selectedInUse) {
-for(int j = 0; j != n; j++) {
-  int i = sel[j];
-  outputColVector.(
-inputColVector1.asScratchPisaTimestamp(i), value, i);
-}
-  } else {
-for(int i = 0; i != n; i++) {
-  outputColVector.(
-inputColVector1.asScratchPisaTimestamp(i), value, i);
-}
-  }
-} else /* there are nulls */ {
-  if (batch.selectedInUse) {
-for(int j = 0; j != n; j++) {
-  int i = sel[j];
-  outputColVector.(
-inputColVector1.asScratchPisaTimestamp(i), value, i);
-  outputIsNull[i] = inputIsNull[i];
-}
-  } else {
-for(int i = 0; i != n; i++) {
-  outputColVector.(
-inputColVector1.asScratchPisaTimestamp(i), value, i);
-}
-System.arraycopy(inputIsNull, 0, outputIsNull, 0, n);
-  }
-}
-
-NullUtil.setNullOutputEntriesColScalar(outputColVector, 
batch.selectedInUse, sel, n);
-  }
-
-  @Override
-  public int getOutputColumn() {
-return outputColumn;
-  }
-
-  @Override
-  public String getOutputType() {
-return "timestamp";
-  }
-}


[01/50] [abbrv] hive git commit: HIVE-13295: Improvement to LDAP search queries in HS2 LDAP Authenticator (Naveen Gangam via Chaoyu Tang)

2016-04-04 Thread jdere
Repository: hive
Updated Branches:
  refs/heads/llap 28d1082b4 -> a7b0ca733


HIVE-13295: Improvement to LDAP search queries in HS2 LDAP Authenticator 
(Naveen Gangam via Chaoyu Tang)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e665f020
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e665f020
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e665f020

Branch: refs/heads/llap
Commit: e665f020b419cf9096006c45f4afcda13fa9e882
Parents: 55383d8
Author: ctang 
Authored: Thu Mar 24 09:34:59 2016 -0700
Committer: ctang 
Committed: Thu Mar 24 09:34:59 2016 -0700

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   9 +
 .../auth/LdapAuthenticationProviderImpl.java| 317 ++-
 .../auth/TestLdapAtnProviderWithMiniDS.java | 200 +++-
 3 files changed, 373 insertions(+), 153 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/e665f020/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index b8b9dcf..b8870f2 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2237,6 +2237,15 @@ public class HiveConf extends Configuration {
 
HIVE_SERVER2_PLAIN_LDAP_USERFILTER("hive.server2.authentication.ldap.userFilter",
 null,
 "COMMA-separated list of LDAP usernames (just short names, not full 
DNs).\n" +
 "For example: hiveuser,impalauser,hiveadmin,hadoopadmin"),
+
HIVE_SERVER2_PLAIN_LDAP_GUIDKEY("hive.server2.authentication.ldap.guidKey", 
"uid",
+"LDAP attribute name whose values are unique in this LDAP server.\n" +
+"For example: uid or CN."),
+
HIVE_SERVER2_PLAIN_LDAP_GROUPMEMBERSHIP_KEY("hive.server2.authentication.ldap.groupMembershipKey",
 "member",
+"LDAP attribute name on the user entry that references a group, the 
user belongs to.\n" +
+"For example: member, uniqueMember or memberUid"),
+
HIVE_SERVER2_PLAIN_LDAP_GROUPCLASS_KEY("hive.server2.authentication.ldap.groupClassKey",
 "groupOfNames",
+"LDAP attribute name on the group entry that is to be used in LDAP 
group searches.\n" +
+"For example: group, groupOfNames or groupOfUniqueNames."),
 
HIVE_SERVER2_PLAIN_LDAP_CUSTOMLDAPQUERY("hive.server2.authentication.ldap.customLDAPQuery",
 null,
 "A full LDAP query that LDAP Atn provider uses to execute against LDAP 
Server.\n" +
 "If this query returns a null resultset, the LDAP Provider fails the 
Authentication\n" +

http://git-wip-us.apache.org/repos/asf/hive/blob/e665f020/service/src/java/org/apache/hive/service/auth/LdapAuthenticationProviderImpl.java
--
diff --git 
a/service/src/java/org/apache/hive/service/auth/LdapAuthenticationProviderImpl.java
 
b/service/src/java/org/apache/hive/service/auth/LdapAuthenticationProviderImpl.java
index 9b0b14d..8f64672 100644
--- 
a/service/src/java/org/apache/hive/service/auth/LdapAuthenticationProviderImpl.java
+++ 
b/service/src/java/org/apache/hive/service/auth/LdapAuthenticationProviderImpl.java
@@ -41,7 +41,6 @@ import org.slf4j.LoggerFactory;
 public class LdapAuthenticationProviderImpl implements 
PasswdAuthenticationProvider {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(LdapAuthenticationProviderImpl.class);
-  private static final String DN_ATTR = "distinguishedName";
 
   private String ldapURL;
   private String baseDN;
@@ -51,6 +50,9 @@ public class LdapAuthenticationProviderImpl implements 
PasswdAuthenticationProvi
   private static List userFilter;
   private static List groupFilter;
   private String customQuery;
+  private static String guid_attr;
+  private static String groupMembership_attr;
+  private static String groupClass_attr;
 
   LdapAuthenticationProviderImpl(HiveConf conf) {
 init(conf);
@@ -61,65 +63,66 @@ public class LdapAuthenticationProviderImpl implements 
PasswdAuthenticationProvi
 baseDN  = 
conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_BASEDN);
 ldapDomain  = 
conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_DOMAIN);
 customQuery = 
conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_CUSTOMLDAPQUERY);
-
-if (customQuery == null) {
-  groupBases = new ArrayList();
-  userBases  = new ArrayList();
-  String groupDNPatterns = 
conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN);
-  String groupFilterVal  = 

[13/50] [abbrv] hive git commit: HIVE-13307: LLAP: Slider package should contain permanent functions (addendum)

2016-04-04 Thread jdere
HIVE-13307: LLAP: Slider package should contain permanent functions (addendum)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b1c45029
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b1c45029
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b1c45029

Branch: refs/heads/llap
Commit: b1c45029ed3652eda9db6650da38ba653d4ada93
Parents: 4fabd03
Author: Gopal V 
Authored: Fri Mar 25 00:18:44 2016 -0700
Committer: Gopal V 
Committed: Fri Mar 25 00:19:35 2016 -0700

--
 ql/src/test/queries/clientpositive/llap_udf.q | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b1c45029/ql/src/test/queries/clientpositive/llap_udf.q
--
diff --git a/ql/src/test/queries/clientpositive/llap_udf.q 
b/ql/src/test/queries/clientpositive/llap_udf.q
index c964f2b..2224bd5 100644
--- a/ql/src/test/queries/clientpositive/llap_udf.q
+++ b/ql/src/test/queries/clientpositive/llap_udf.q
@@ -3,7 +3,7 @@ set hive.explain.user=false;
 set hive.execution.mode=llap;
 set hive.llap.execution.mode=all;
 set hive.fetch.task.conversion=none;
-set hive.llap.daemon.allow.permanent.fns=true;
+set hive.llap.allow.permanent.fns=true;
 
 drop table if exists src_orc;
 create table src_orc stored as orc as select * from src;
@@ -37,11 +37,11 @@ DROP FUNCTION test_udf4;
 EXPLAIN
 SELECT test_udf0(cast(key as string)) from src_orc;
 
-set hive.llap.daemon.allow.permanent.fns=false;
+set hive.llap.allow.permanent.fns=false;
 
 EXPLAIN
 SELECT test_udf3(cast(key as string)) from src_orc;
 
 
 drop table if exists src_orc;
-set hive.execution.mode=container;
\ No newline at end of file
+set hive.execution.mode=container;



[27/50] [abbrv] hive git commit: HIVE-13111: Fix timestamp / interval_day_time wrong results with HIVE-9862 (Matt McCline, reviewed by Jason Dere)

2016-04-04 Thread jdere
http://git-wip-us.apache.org/repos/asf/hive/blob/52016296/ql/src/test/results/clientpositive/tez/vector_interval_arithmetic.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/tez/vector_interval_arithmetic.q.out 
b/ql/src/test/results/clientpositive/tez/vector_interval_arithmetic.q.out
new file mode 100644
index 000..8409a01
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/vector_interval_arithmetic.q.out
@@ -0,0 +1,1086 @@
+PREHOOK: query: create table unique_timestamps (tsval timestamp) STORED AS 
TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@unique_timestamps
+POSTHOOK: query: create table unique_timestamps (tsval timestamp) STORED AS 
TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@unique_timestamps
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/timestamps.txt' 
OVERWRITE INTO TABLE unique_timestamps
+PREHOOK: type: LOAD
+ A masked pattern was here 
+PREHOOK: Output: default@unique_timestamps
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/timestamps.txt' 
OVERWRITE INTO TABLE unique_timestamps
+POSTHOOK: type: LOAD
+ A masked pattern was here 
+POSTHOOK: Output: default@unique_timestamps
+PREHOOK: query: create table interval_arithmetic_1 (dateval date, tsval 
timestamp) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@interval_arithmetic_1
+POSTHOOK: query: create table interval_arithmetic_1 (dateval date, tsval 
timestamp) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@interval_arithmetic_1
+PREHOOK: query: insert overwrite table interval_arithmetic_1
+  select cast(tsval as date), tsval from unique_timestamps
+PREHOOK: type: QUERY
+PREHOOK: Input: default@unique_timestamps
+PREHOOK: Output: default@interval_arithmetic_1
+POSTHOOK: query: insert overwrite table interval_arithmetic_1
+  select cast(tsval as date), tsval from unique_timestamps
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@unique_timestamps
+POSTHOOK: Output: default@interval_arithmetic_1
+POSTHOOK: Lineage: interval_arithmetic_1.dateval EXPRESSION 
[(unique_timestamps)unique_timestamps.FieldSchema(name:tsval, type:timestamp, 
comment:null), ]
+POSTHOOK: Lineage: interval_arithmetic_1.tsval SIMPLE 
[(unique_timestamps)unique_timestamps.FieldSchema(name:tsval, type:timestamp, 
comment:null), ]
+_c0tsval
+PREHOOK: query: -- interval year-month arithmetic
+explain
+select
+  dateval,
+  dateval - interval '2-2' year to month,
+  dateval - interval '-2-2' year to month,
+  dateval + interval '2-2' year to month,
+  dateval + interval '-2-2' year to month,
+  - interval '2-2' year to month + dateval,
+  interval '2-2' year to month + dateval
+from interval_arithmetic_1
+order by dateval
+PREHOOK: type: QUERY
+POSTHOOK: query: -- interval year-month arithmetic
+explain
+select
+  dateval,
+  dateval - interval '2-2' year to month,
+  dateval - interval '-2-2' year to month,
+  dateval + interval '2-2' year to month,
+  dateval + interval '-2-2' year to month,
+  - interval '2-2' year to month + dateval,
+  interval '2-2' year to month + dateval
+from interval_arithmetic_1
+order by dateval
+POSTHOOK: type: QUERY
+Explain
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+Tez
+ A masked pattern was here 
+  Edges:
+Reducer 2 <- Map 1 (SIMPLE_EDGE)
+ A masked pattern was here 
+  Vertices:
+Map 1 
+Map Operator Tree:
+TableScan
+  alias: interval_arithmetic_1
+  Statistics: Num rows: 50 Data size: 4800 Basic stats: 
COMPLETE Column stats: NONE
+  Select Operator
+expressions: dateval (type: date), (dateval - 2-2) (type: 
date), (dateval - -2-2) (type: date), (dateval + 2-2) (type: date), (dateval + 
-2-2) (type: date), (-2-2 + dateval) (type: date), (2-2 + dateval) (type: date)
+outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6
+Statistics: Num rows: 50 Data size: 4800 Basic stats: 
COMPLETE Column stats: NONE
+Reduce Output Operator
+  key expressions: _col0 (type: date)
+  sort order: +
+  Statistics: Num rows: 50 Data size: 4800 Basic stats: 
COMPLETE Column stats: NONE
+  value expressions: _col1 (type: date), _col2 (type: 
date), _col3 (type: date), _col4 (type: date), _col5 (type: date), _col6 (type: 
date)
+Execution mode: vectorized
+Reducer 2 
+Execution mode: vectorized
+Reduce Operator Tree:
+  Select Operator
+expressions: KEY.reducesinkkey0 (type: 

[23/50] [abbrv] hive git commit: HIVE-12992: Hive on tez: Bucket map join plan is incorrect (Vikram Dixit K, reviewed by Jason Dere)

2016-04-04 Thread jdere
HIVE-12992: Hive on tez: Bucket map join plan is incorrect (Vikram Dixit K, 
reviewed by Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/761b5471
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/761b5471
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/761b5471

Branch: refs/heads/llap
Commit: 761b5471a0a38ee35a715ea2d4e6d268d5a9
Parents: 7747458
Author: vikram 
Authored: Mon Mar 28 11:25:11 2016 -0700
Committer: vikram 
Committed: Mon Mar 28 11:37:32 2016 -0700

--
 .../hadoop/hive/ql/exec/OperatorUtils.java  |  45 ++-
 .../ql/optimizer/ReduceSinkMapJoinProc.java |  24 +-
 .../clientpositive/bucket_map_join_tez1.q   |  27 ++
 .../llap/bucket_map_join_tez1.q.out | 308 +++
 .../spark/bucket_map_join_tez1.q.out| 306 ++
 .../tez/bucket_map_join_tez1.q.out  | 294 ++
 6 files changed, 985 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/761b5471/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java
index 3d664c1..41507b1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java
@@ -26,6 +26,7 @@ import java.util.Map;
 import java.util.Set;
 
 import org.apache.hadoop.hive.ql.exec.NodeUtils.Function;
+import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.mapred.OutputCollector;
 import org.slf4j.Logger;
@@ -80,6 +81,11 @@ public class OperatorUtils {
 return found.size() == 1 ? found.iterator().next() : null;
   }
 
+  public static  T findSingleOperatorUpstreamJoinAccounted(Operator 
start, Class clazz) {
+Set found = findOperatorsUpstreamJoinAccounted(start, clazz, new 
HashSet());
+return found.size() == 1 ? found.iterator().next(): null;
+  }
+
   public static  Set findOperatorsUpstream(Collection 
starts, Class clazz) {
 Set found = new HashSet();
 for (Operator start : starts) {
@@ -101,6 +107,34 @@ public class OperatorUtils {
 return found;
   }
 
+  public static  Set findOperatorsUpstreamJoinAccounted(Operator 
start, Class clazz,
+  Set found) {
+if (clazz.isInstance(start)) {
+  found.add((T) start);
+}
+int onlyIncludeIndex = -1;
+if (start instanceof AbstractMapJoinOperator) {
+  AbstractMapJoinOperator mapJoinOp = (AbstractMapJoinOperator) start;
+  MapJoinDesc desc = (MapJoinDesc) mapJoinOp.getConf();
+  onlyIncludeIndex = desc.getPosBigTable();
+}
+if (start.getParentOperators() != null) {
+  int i = 0;
+  for (Operator parent : start.getParentOperators()) {
+if (onlyIncludeIndex >= 0) {
+  if (onlyIncludeIndex == i) {
+findOperatorsUpstream(parent, clazz, found);
+  }
+} else {
+  findOperatorsUpstream(parent, clazz, found);
+}
+i++;
+  }
+}
+return found;
+  }
+
+
   public static void setChildrenCollector(List childOperators, OutputCollector out) {
 if (childOperators == null) {
   return;
@@ -202,7 +236,7 @@ public class OperatorUtils {
   }
 
   public static boolean sameRowSchema(Operator operator1, Operator 
operator2) {
-   return operator1.getSchema().equals(operator2.getSchema());
+return operator1.getSchema().equals(operator2.getSchema());
   }
 
   /**
@@ -220,9 +254,9 @@ public class OperatorUtils {
* them
*/
   public static Multimap, Operator> 
classifyOperators(
-Operator start, Set> classes) {
+  Operator start, Set> classes) {
 ImmutableMultimap.Builder, Operator> 
resultMap =
-  new ImmutableMultimap.Builder, 
Operator>();
+new ImmutableMultimap.Builder, 
Operator>();
 List ops = new ArrayList();
 ops.add(start);
 while (!ops.isEmpty()) {
@@ -255,9 +289,9 @@ public class OperatorUtils {
* them
*/
   public static Multimap, Operator> 
classifyOperatorsUpstream(
-Operator start, Set> classes) {
+  Operator start, Set> classes) {
 ImmutableMultimap.Builder, Operator> 
resultMap =
-  new ImmutableMultimap.Builder, 
Operator>();
+new ImmutableMultimap.Builder, 
Operator>();
 List ops = new ArrayList();
 ops.add(start);
 while (!ops.isEmpty()) {
@@ -296,5 +330,4 @@ public class 

[45/50] [abbrv] hive git commit: HIVE-13361: Orc concatenation should enforce the compression buffer size (Prasanth Jayachandran reviewed by Gopal V)

2016-04-04 Thread jdere
HIVE-13361: Orc concatenation should enforce the compression buffer size 
(Prasanth Jayachandran reviewed by Gopal V)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8c1f055d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8c1f055d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8c1f055d

Branch: refs/heads/llap
Commit: 8c1f055d91d5dd50544e7d39f2dc1ad087ac78e2
Parents: b431c27
Author: Prasanth Jayachandran 
Authored: Tue Mar 29 22:20:20 2016 -0700
Committer: Prasanth Jayachandran 
Committed: Tue Mar 29 22:20:20 2016 -0700

--
 .../test/resources/testconfiguration.properties |   1 +
 orc/src/java/org/apache/orc/OrcFile.java|  21 +-
 .../java/org/apache/orc/impl/WriterImpl.java|  18 +-
 .../hive/ql/exec/OrcFileMergeOperator.java  |   4 +-
 .../test/queries/clientpositive/orc_merge12.q   |  51 ++
 .../results/clientpositive/orc_merge12.q.out| 606 +++
 .../clientpositive/tez/orc_merge12.q.out| 606 +++
 7 files changed, 1295 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/8c1f055d/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index ed26dea..232f84e 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -175,6 +175,7 @@ minitez.query.files.shared=acid_globallimit.q,\
   orc_merge9.q,\
   orc_merge10.q,\
   orc_merge11.q,\
+  orc_merge12.q,\
   orc_merge_incompat1.q,\
   orc_merge_incompat2.q,\
   orc_merge_incompat3.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/8c1f055d/orc/src/java/org/apache/orc/OrcFile.java
--
diff --git a/orc/src/java/org/apache/orc/OrcFile.java 
b/orc/src/java/org/apache/orc/OrcFile.java
index 3945a5d..85506ff 100644
--- a/orc/src/java/org/apache/orc/OrcFile.java
+++ b/orc/src/java/org/apache/orc/OrcFile.java
@@ -232,6 +232,7 @@ public class OrcFile {
 private long blockSizeValue;
 private int rowIndexStrideValue;
 private int bufferSizeValue;
+private boolean enforceBufferSize = false;
 private boolean blockPaddingValue;
 private CompressionKind compressValue;
 private MemoryManager memoryManagerValue;
@@ -317,7 +318,10 @@ public class OrcFile {
 
 /**
  * The size of the memory buffers used for compressing and storing the
- * stripe in memory.
+ * stripe in memory. NOTE: ORC writer may choose to use smaller buffer
+ * size based on stripe size and number of columns for efficient stripe
+ * writing and memory utilization. To enforce writer to use the requested
+ * buffer size use enforceBufferSize().
  */
 public WriterOptions bufferSize(int value) {
   bufferSizeValue = value;
@@ -325,6 +329,17 @@ public class OrcFile {
 }
 
 /**
+ * Enforce writer to use requested buffer size instead of estimating
+ * buffer size based on stripe size and number of columns.
+ * See bufferSize() method for more info.
+ * Default: false
+ */
+public WriterOptions enforceBufferSize() {
+  enforceBufferSize = true;
+  return this;
+}
+
+/**
  * Sets whether the HDFS blocks are padded to prevent stripes from
  * straddling blocks. Padding improves locality and thus the speed of
  * reading, but costs space.
@@ -460,6 +475,10 @@ public class OrcFile {
   return bufferSizeValue;
 }
 
+public boolean isEnforceBufferSize() {
+  return enforceBufferSize;
+}
+
 public int getRowIndexStride() {
   return rowIndexStrideValue;
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/8c1f055d/orc/src/java/org/apache/orc/impl/WriterImpl.java
--
diff --git a/orc/src/java/org/apache/orc/impl/WriterImpl.java 
b/orc/src/java/org/apache/orc/impl/WriterImpl.java
index d4b9a14..f8afe06 100644
--- a/orc/src/java/org/apache/orc/impl/WriterImpl.java
+++ b/orc/src/java/org/apache/orc/impl/WriterImpl.java
@@ -180,8 +180,12 @@ public class WriterImpl implements Writer, 
MemoryManager.Callback {
 buildIndex = rowIndexStride > 0;
 codec = createCodec(compress);
 int numColumns = schema.getMaximumId() + 1;
-this.bufferSize = getEstimatedBufferSize(defaultStripeSize,
-numColumns, opts.getBufferSize());
+if (opts.isEnforceBufferSize()) {
+  this.bufferSize = opts.getBufferSize();
+} else {
+  this.bufferSize = getEstimatedBufferSize(defaultStripeSize,
+  

[12/50] [abbrv] hive git commit: HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions, ADDENDUM (Wei Zheng, reviewed by Eugene Koifman)

2016-04-04 Thread jdere
HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions, 
ADDENDUM (Wei Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4fabd038
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4fabd038
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4fabd038

Branch: refs/heads/llap
Commit: 4fabd038cf64b906a89726805958c43b97194291
Parents: 6bfec2e
Author: Wei Zheng 
Authored: Thu Mar 24 22:18:32 2016 -0700
Committer: Wei Zheng 
Committed: Thu Mar 24 22:18:32 2016 -0700

--
 .../java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java   | 5 +++--
 .../java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java | 4 ++--
 ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java | 5 +++--
 3 files changed, 8 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/4fabd038/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
index 4c31a49..23b1b7f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
@@ -275,8 +275,9 @@ public class Cleaner extends CompactorThread {
 try {
   FileSystem.closeAllForUGI(ugi);
 } catch (IOException exception) {
-  LOG.error("Could not clean up file-system handles for UGI: " + ugi, 
exception + " for " +
-  ci.getFullPartitionName());}
+  LOG.error("Could not clean up file-system handles for UGI: " + ugi + 
" for " +
+  ci.getFullPartitionName(), exception);
+}
   }
   txnHandler.markCleaned(ci);
 } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/hive/blob/4fabd038/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
index 98ebf53..abbe5d4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
@@ -235,8 +235,8 @@ public class Initiator extends CompactorThread {
   try {
 FileSystem.closeAllForUGI(ugi);
   } catch (IOException exception) {
-LOG.error("Could not clean up file-system handles for UGI: " + ugi, 
exception + " for " +
-ci.getFullPartitionName());
+LOG.error("Could not clean up file-system handles for UGI: " + ugi + " 
for " +
+ci.getFullPartitionName(), exception);
   }
   return compactionType;
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/4fabd038/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
index e21ca27..6238e2b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
@@ -175,8 +175,9 @@ public class Worker extends CompactorThread {
 try {
   FileSystem.closeAllForUGI(ugi);
 } catch (IOException exception) {
-  LOG.error("Could not clean up file-system handles for UGI: " + 
ugi, exception + " for " +
-  ci.getFullPartitionName());}
+  LOG.error("Could not clean up file-system handles for UGI: " + 
ugi + " for " +
+  ci.getFullPartitionName(), exception);
+}
   }
   txnHandler.markCompacted(ci);
 } catch (Exception e) {



[14/50] [abbrv] hive git commit: HIVE-12531 : Implement fast-path for Year/Month UDFs for dates between 1999 and 2038 (Jason Dere via Sergey Shelukhin)

2016-04-04 Thread jdere
HIVE-12531 : Implement fast-path for Year/Month UDFs for dates between 1999 and 
2038 (Jason Dere via Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e384b2b6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e384b2b6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e384b2b6

Branch: refs/heads/llap
Commit: e384b2b657c819d5963b8f76222f78bb479a29a2
Parents: b75d9ea
Author: Jason Dere 
Authored: Wed Dec 9 11:48:00 2015 -0800
Committer: Ashutosh Chauhan 
Committed: Fri Mar 25 07:21:55 2016 -0700

--
 .../org/apache/hadoop/hive/ql/udf/UDFMonth.java | 16 
 .../java/org/apache/hadoop/hive/ql/udf/UDFYear.java | 16 
 .../expressions/TestVectorDateExpressions.java  | 13 ++---
 3 files changed, 26 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/e384b2b6/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFMonth.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFMonth.java 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFMonth.java
index 8c2b0e4..05afb8e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFMonth.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFMonth.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.hive.ql.udf;
 
 import java.text.ParseException;
 import java.text.SimpleDateFormat;
-import java.util.Calendar;
 import java.util.Date;
+import org.joda.time.MutableDateTime;
 
 import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.UDF;
@@ -53,7 +53,7 @@ import org.apache.hadoop.io.Text;
 @NDV(maxNdv = 31)
 public class UDFMonth extends UDF {
   private final SimpleDateFormat formatter = new 
SimpleDateFormat("-MM-dd");
-  private final Calendar calendar = Calendar.getInstance();
+  private transient final MutableDateTime mdt = new MutableDateTime();
 
   private final IntWritable result = new IntWritable();
 
@@ -75,8 +75,8 @@ public class UDFMonth extends UDF {
 }
 try {
   Date date = formatter.parse(dateString.toString());
-  calendar.setTime(date);
-  result.set(1 + calendar.get(Calendar.MONTH));
+  mdt.setMillis(date.getTime());
+  result.set(mdt.getMonthOfYear());
   return result;
 } catch (ParseException e) {
   return null;
@@ -88,8 +88,8 @@ public class UDFMonth extends UDF {
   return null;
 }
 
-calendar.setTime(d.get());
-result.set(1 + calendar.get(Calendar.MONTH));
+mdt.setMillis(d.get().getTime());
+result.set(mdt.getMonthOfYear());
 return result;
   }
 
@@ -98,8 +98,8 @@ public class UDFMonth extends UDF {
   return null;
 }
 
-calendar.setTime(t.getTimestamp());
-result.set(1 + calendar.get(Calendar.MONTH));
+mdt.setMillis(t.getTimestamp().getTime());
+result.set(mdt.getMonthOfYear());
 return result;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e384b2b6/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFYear.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFYear.java 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFYear.java
index d7ecd8c..fb3a655 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFYear.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFYear.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.hive.ql.udf;
 
 import java.text.ParseException;
 import java.text.SimpleDateFormat;
-import java.util.Calendar;
 import java.util.Date;
+import org.joda.time.MutableDateTime;
 
 import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.UDF;
@@ -53,7 +53,7 @@ import org.apache.hadoop.io.Text;
 @NDV(maxNdv = 20) // although technically its unbounded, its unlikely we will 
ever see ndv > 20
 public class UDFYear extends UDF {
   private final SimpleDateFormat formatter = new 
SimpleDateFormat("-MM-dd");
-  private final Calendar calendar = Calendar.getInstance();
+  private transient final MutableDateTime mdt = new MutableDateTime();
 
   private final IntWritable result = new IntWritable();
 
@@ -77,8 +77,8 @@ public class UDFYear extends UDF {
 
 try {
   Date date = formatter.parse(dateString.toString());
-  calendar.setTime(date);
-  result.set(calendar.get(Calendar.YEAR));
+  mdt.setMillis(date.getTime());
+  result.set(mdt.getYear());
   return result;
 } catch (ParseException e) {
   return null;
@@ -90,8 +90,8 @@ public class UDFYear extends UDF {
   return null;
 }
 
-calendar.setTime(d.get());
-result.set(calendar.get(Calendar.YEAR));
+mdt.setMillis(d.get().getTime());
+result.set(mdt.getYear());
 

[34/50] [abbrv] hive git commit: HIVE-13111: Fix timestamp / interval_day_time wrong results with HIVE-9862 (Matt McCline, reviewed by Jason Dere)

2016-04-04 Thread jdere
http://git-wip-us.apache.org/repos/asf/hive/blob/52016296/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticTimestampColumn.txt
--
diff --git 
a/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticTimestampColumn.txt
 
b/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticTimestampColumn.txt
index 6241ee2..63cebaf 100644
--- 
a/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticTimestampColumn.txt
+++ 
b/ql/src/gen/vectorization/ExpressionTemplates/DateColumnArithmeticTimestampColumn.txt
@@ -18,28 +18,155 @@
 
 package org.apache.hadoop.hive.ql.exec.vector.expressions.gen;
 
-import org.apache.hadoop.hive.common.type.PisaTimestamp;
+import java.sql.Timestamp;
+
+import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.NullUtil;
 import org.apache.hadoop.hive.ql.exec.vector.*;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 import org.apache.hadoop.hive.ql.util.DateTimeMath;
+import org.apache.hadoop.hive.serde2.io.DateWritable;
 
 /**
- * Generated from template DateColumnArithmeticTimestampColumn.txt, which 
covers binary arithmetic
- * expressions between a date column and a timestamp column.
+ * Generated from template DateColumnArithmeticTimestampColumn.txt, a class
+ * which covers binary arithmetic expressions between a date column and 
timestamp column.
  */
-public class  extends  {
+public class  extends VectorExpression {
 
   private static final long serialVersionUID = 1L;
 
+  private int colNum1;
+  private int colNum2;
+  private int outputColumn;
+  private Timestamp scratchTimestamp1;
+  private DateTimeMath dtm = new DateTimeMath();
+
   public (int colNum1, int colNum2, int outputColumn) {
-super(colNum1, colNum2, outputColumn);
+this.colNum1 = colNum1;
+this.colNum2 = colNum2;
+this.outputColumn = outputColumn;
+scratchTimestamp1 = new Timestamp(0);
   }
 
   public () {
-super();
+  }
+
+  @Override
+  public void evaluate(VectorizedRowBatch batch) {
+
+if (childExpressions != null) {
+  super.evaluateChildren(batch);
+}
+
+// Input #1 is type Date (days).  For the math we convert it to a 
timestamp.
+LongColumnVector inputColVector1 = (LongColumnVector) batch.cols[colNum1];
+
+// Input #2 is type .
+ inputColVector2 = () 
batch.cols[colNum2];
+
+// Output is type .
+ outputColVector = () 
batch.cols[outputColumn];
+
+int[] sel = batch.selected;
+int n = batch.size;
+long[] vector1 = inputColVector1.vector;
+
+// return immediately if batch is empty
+if (n == 0) {
+  return;
+}
+
+outputColVector.isRepeating =
+ inputColVector1.isRepeating && inputColVector2.isRepeating
+  || inputColVector1.isRepeating && !inputColVector1.noNulls && 
inputColVector1.isNull[0]
+  || inputColVector2.isRepeating && !inputColVector2.noNulls && 
inputColVector2.isNull[0];
+
+// Handle nulls first
+NullUtil.propagateNullsColCol(
+  inputColVector1, inputColVector2, outputColVector, sel, n, 
batch.selectedInUse);
+
+/* Disregard nulls for processing. In other words,
+ * the arithmetic operation is performed even if one or
+ * more inputs are null. This is to improve speed by avoiding
+ * conditional checks in the inner loop.
+ */
+if (inputColVector1.isRepeating && inputColVector2.isRepeating) {
+  scratchTimestamp1.setTime(DateWritable.daysToMillis((int) vector1[0]));
+  dtm.(
+  scratchTimestamp1, inputColVector2.asScratch(0), 
outputColVector.getScratch());
+  outputColVector.setFromScratch(0);
+} else if (inputColVector1.isRepeating) {
+  scratchTimestamp1.setTime(DateWritable.daysToMillis((int) vector1[0]));
+  if (batch.selectedInUse) {
+for(int j = 0; j != n; j++) {
+  int i = sel[j];
+  dtm.(
+  scratchTimestamp1, 
inputColVector2.asScratch(i), 
outputColVector.getScratch());
+  outputColVector.setFromScratch(i);
+}
+  } else {
+for(int i = 0; i != n; i++) {
+  dtm.(
+  scratchTimestamp1, 
inputColVector2.asScratch(i), 
outputColVector.getScratch());
+  outputColVector.setFromScratch(i);
+}
+  }
+} else if (inputColVector2.isRepeating) {
+   value2 = 
inputColVector2.asScratch(0);
+  if (batch.selectedInUse) {
+for(int j = 0; j != n; j++) {
+  int i = sel[j];
+  scratchTimestamp1.setTime(DateWritable.daysToMillis((int) 
vector1[i]));
+  dtm.(
+  scratchTimestamp1, value2, 
outputColVector.getScratch());
+  outputColVector.setFromScratch(i);
+ }
+  } else {
+for(int i = 0; i != n; i++) {
+  

[15/50] [abbrv] hive git commit: HIVE-12552 : Wrong number of reducer estimation causing job to fail (Rajesh Balamohan via Gunther Hagleitner)

2016-04-04 Thread jdere
HIVE-12552 : Wrong number of reducer estimation causing job to fail (Rajesh 
Balamohan via Gunther Hagleitner)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b75d9ea8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b75d9ea8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b75d9ea8

Branch: refs/heads/llap
Commit: b75d9ea8a73f85d1420f8e3ba1e3b8f9b9acdc5e
Parents: b1c4502
Author: Rajesh Balamohan 
Authored: Wed Dec 9 11:48:00 2015 -0800
Committer: Ashutosh Chauhan 
Committed: Fri Mar 25 07:21:55 2016 -0700

--
 ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b75d9ea8/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
index 8eab3af..d5a2eca 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
@@ -119,7 +119,8 @@ public class GenTezUtils {
 
   // max we allow tez to pick
   int maxPartition = (int) (reduceSink.getConf().getNumReducers() * 
maxPartitionFactor);
-  maxPartition = (maxPartition > maxReducers) ? maxReducers : maxPartition;
+  maxPartition = Math.max(1, (maxPartition > maxReducers) ? maxReducers :
+  maxPartition);
 
   reduceWork.setMinReduceTasks(minPartition);
   reduceWork.setMaxReduceTasks(maxPartition);



[02/50] [abbrv] hive git commit: HIVE-13300 : Hive on spark throws exception for multi-insert with join (Szehon, reviewed by Xuefu and Chao Sun)

2016-04-04 Thread jdere
HIVE-13300 : Hive on spark throws exception for multi-insert with join (Szehon, 
reviewed by Xuefu and Chao Sun)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/219d3527
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/219d3527
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/219d3527

Branch: refs/heads/llap
Commit: 219d3527cfac09045f0ac247821746e7c95dcb8c
Parents: e665f02
Author: Szehon Ho 
Authored: Thu Mar 24 11:08:04 2016 -0700
Committer: Szehon Ho 
Committed: Thu Mar 24 11:09:10 2016 -0700

--
 .../test/resources/testconfiguration.properties |   1 +
 .../ql/exec/spark/SparkReduceRecordHandler.java |   2 +
 .../clientpositive/multi_insert_with_join.q |  29 +
 .../clientpositive/multi_insert_with_join.q.out | 128 +++
 .../spark/multi_insert_with_join.q.out  | 128 +++
 5 files changed, 288 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/219d3527/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index 232e262..f8e8bda 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -1006,6 +1006,7 @@ spark.query.files=add_part_multiple.q, \
   multi_insert_lateral_view.q, \
   multi_insert_mixed.q, \
   multi_insert_move_tasks_share_dependencies.q, \
+  multi_insert_with_join.q, \
   multi_join_union.q, \
   multi_join_union_src.q, \
   multigroupby_singlemr.q, \

http://git-wip-us.apache.org/repos/asf/hive/blob/219d3527/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java
index 439e0df..0d31e5f 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java
@@ -230,8 +230,10 @@ public class SparkReduceRecordHandler extends 
SparkRecordHandler {
   if (isTagged) {
 // remove the tag from key coming out of reducer
 // and store it in separate variable.
+// make a copy for multi-insert with join case as Spark re-uses input 
key from same parent
 int size = keyWritable.getSize() - 1;
 tag = keyWritable.get()[size];
+keyWritable = new BytesWritable(keyWritable.getBytes(), size);
 keyWritable.setSize(size);
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/219d3527/ql/src/test/queries/clientpositive/multi_insert_with_join.q
--
diff --git a/ql/src/test/queries/clientpositive/multi_insert_with_join.q 
b/ql/src/test/queries/clientpositive/multi_insert_with_join.q
new file mode 100644
index 000..862dd9f
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/multi_insert_with_join.q
@@ -0,0 +1,29 @@
+set hive.auto.convert.join=false;
+
+drop table if exists status_updates;
+drop table if exists profiles;
+drop table if exists school_summary;
+drop table if exists gender_summary;
+
+create table status_updates(userid int,status string,ds string);
+create table profiles(userid int,school string,gender int);
+create table school_summary(school string,cnt int) partitioned by (ds string);
+create table gender_summary(gender int, cnt int) partitioned by (ds string);
+
+insert into status_updates values (1, "status_1", "2009-03-20");
+insert into profiles values (1, "school_1", 0);
+
+FROM (SELECT a.status, b.school, b.gender
+FROM status_updates a JOIN profiles b
+ON (a.userid = b.userid and
+a.ds='2009-03-20' )
+) subq1
+INSERT OVERWRITE TABLE gender_summary
+PARTITION(ds='2009-03-20')
+SELECT subq1.gender, COUNT(1) GROUP BY subq1.gender
+INSERT OVERWRITE TABLE school_summary
+PARTITION(ds='2009-03-20')
+SELECT subq1.school, COUNT(1) GROUP BY subq1.school;
+
+select * from school_summary;
+select * from gender_summary;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/219d3527/ql/src/test/results/clientpositive/multi_insert_with_join.q.out
--
diff --git a/ql/src/test/results/clientpositive/multi_insert_with_join.q.out 
b/ql/src/test/results/clientpositive/multi_insert_with_join.q.out
new file mode 100644
index 000..28bce84
--- /dev/null
+++ b/ql/src/test/results/clientpositive/multi_insert_with_join.q.out
@@ -0,0 

[19/50] [abbrv] hive git commit: HIVE-12960: Migrate Column Stats Extrapolation and UniformDistribution to HBaseStore (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

2016-04-04 Thread jdere
HIVE-12960: Migrate Column Stats Extrapolation and UniformDistribution to 
HBaseStore (Pengcheng Xiong, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/96862093
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/96862093
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/96862093

Branch: refs/heads/llap
Commit: 968620932301dc64cd435292726943a6c0a42551
Parents: 3038b05
Author: Pengcheng Xiong 
Authored: Sun Mar 27 11:46:17 2016 -0700
Committer: Pengcheng Xiong 
Committed: Sun Mar 27 12:11:39 2016 -0700

--
 .../hive/metastore/StatObjectConverter.java |   2 +-
 .../hadoop/hive/metastore/hbase/HBaseUtils.java |   8 +-
 .../hadoop/hive/metastore/hbase/StatsCache.java |  20 +-
 .../stats/BinaryColumnStatsAggregator.java  |  43 +-
 .../stats/BooleanColumnStatsAggregator.java |  42 +-
 .../hbase/stats/ColumnStatsAggregator.java  |  12 +-
 .../stats/ColumnStatsAggregatorFactory.java |   8 +-
 .../stats/DecimalColumnStatsAggregator.java | 340 -
 .../stats/DoubleColumnStatsAggregator.java  | 307 +++-
 .../hbase/stats/IExtrapolatePartStatus.java |  30 +
 .../hbase/stats/LongColumnStatsAggregator.java  | 305 +++-
 .../stats/StringColumnStatsAggregator.java  |  85 ++-
 ...stHBaseAggregateStatsCacheWithBitVector.java |   6 +-
 .../TestHBaseAggregateStatsExtrapolation.java   | 717 +++
 .../TestHBaseAggregateStatsNDVUniformDist.java  | 581 +++
 .../clientpositive/tez/explainuser_1.q.out  |  92 +--
 16 files changed, 2454 insertions(+), 144 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/96862093/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java
index b3ceff1..e119dd8 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java
@@ -650,7 +650,7 @@ public class StatObjectConverter {
 }
   }
 
-  private static Decimal createThriftDecimal(String s) {
+  public static Decimal createThriftDecimal(String s) {
 BigDecimal d = new BigDecimal(s);
 return new Decimal(ByteBuffer.wrap(d.unscaledValue().toByteArray()), 
(short)d.scale());
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/96862093/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
index 9ec7cd5..e0b449b 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
@@ -19,6 +19,8 @@
 package org.apache.hadoop.hive.metastore.hbase;
 
 import java.io.IOException;
+import java.math.BigDecimal;
+import java.math.BigInteger;
 import java.nio.charset.Charset;
 import java.nio.charset.StandardCharsets;
 import java.security.MessageDigest;
@@ -88,7 +90,7 @@ import com.google.protobuf.InvalidProtocolBufferException;
 /**
  * Utility functions
  */
-class HBaseUtils {
+public class HBaseUtils {
 
   final static Charset ENCODING = StandardCharsets.UTF_8;
   final static char KEY_SEPARATOR = '\u0001';
@@ -1421,4 +1423,8 @@ class HBaseUtils {
 b[7] = (byte)(v >>>  0);
 return b;
   }
+
+  public static double getDoubleValue(Decimal decimal) {
+return new BigDecimal(new BigInteger(decimal.getUnscaled()), 
decimal.getScale()).doubleValue();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/96862093/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java
index f1d2e50..18f8afc 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java
@@ -85,12 +85,12 @@ class StatsCache {
   @Override
   public AggrStats load(StatsCacheKey key) throws Exception {
 int numBitVectors = 
HiveStatsUtils.getNumBitVectorsForNDVEstimation(conf);
+boolean useDensityFunctionForNDVEstimation = 
HiveConf.getBoolVar(conf, 

[44/50] [abbrv] hive git commit: HIVE-12619: Switching the field order within an array of structs causes the query to fail (Mohammad and Jimmy, reviewed by Sergio)

2016-04-04 Thread jdere
HIVE-12619: Switching the field order within an array of structs causes the 
query to fail (Mohammad and Jimmy, reviewed by Sergio)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b431c278
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b431c278
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b431c278

Branch: refs/heads/llap
Commit: b431c2788cd37fc788acd48beaf31c403361c1f0
Parents: 20a8192
Author: Jimmy Xiang 
Authored: Thu Mar 10 10:32:57 2016 -0800
Committer: Jimmy Xiang 
Committed: Tue Mar 29 19:47:18 2016 -0700

--
 .../io/parquet/convert/HiveSchemaConverter.java | 10 +--
 .../parquet/read/DataWritableReadSupport.java   | 75 
 .../ql/io/parquet/serde/ParquetHiveSerDe.java   | 11 +--
 .../clientpositive/parquet_schema_evolution.q   | 14 
 .../parquet_map_null.q.java1.8.out  |  1 +
 .../parquet_schema_evolution.q.out  | 65 +
 .../clientpositive/parquet_type_promotion.q.out |  2 +-
 7 files changed, 131 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b431c278/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveSchemaConverter.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveSchemaConverter.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveSchemaConverter.java
index b01f21f..40f6256 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveSchemaConverter.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveSchemaConverter.java
@@ -24,12 +24,10 @@ import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
-
 import org.apache.parquet.schema.ConversionPatterns;
 import org.apache.parquet.schema.GroupType;
 import org.apache.parquet.schema.MessageType;
 import org.apache.parquet.schema.OriginalType;
-import org.apache.parquet.schema.PrimitiveType;
 import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName;
 import org.apache.parquet.schema.Type;
 import org.apache.parquet.schema.Type.Repetition;
@@ -120,9 +118,10 @@ public class HiveSchemaConverter {
 
   // An optional group containing a repeated anonymous group "bag", containing
   // 1 anonymous element "array_element"
+  @SuppressWarnings("deprecation")
   private static GroupType convertArrayType(final String name, final 
ListTypeInfo typeInfo) {
 final TypeInfo subType = typeInfo.getListElementTypeInfo();
-return listWrapper(name, OriginalType.LIST, new 
GroupType(Repetition.REPEATED,
+return new GroupType(Repetition.OPTIONAL, name, OriginalType.LIST, new 
GroupType(Repetition.REPEATED,
 ParquetHiveSerDe.ARRAY.toString(), convertType("array_element", 
subType)));
   }
 
@@ -143,9 +142,4 @@ public class HiveSchemaConverter {
 typeInfo.getMapValueTypeInfo());
 return ConversionPatterns.mapType(Repetition.OPTIONAL, name, keyType, 
valueType);
   }
-
-  private static GroupType listWrapper(final String name, final OriginalType 
originalType,
-  final GroupType groupType) {
-return new GroupType(Repetition.OPTIONAL, name, originalType, groupType);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/b431c278/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java
index 53f3b72..3e38cc7 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java
@@ -23,24 +23,28 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.io.IOConstants;
 import 
org.apache.hadoop.hive.ql.io.parquet.convert.DataWritableRecordConverter;
+import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe;
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
 import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import 

[37/50] [abbrv] hive git commit: HIVE-10729: Query failed when select complex columns from joinned table (tez map join only) (Matt McCline, reviewed by Sergey Shelukhin)

2016-04-04 Thread jdere
HIVE-10729: Query failed when select complex columns from joinned table (tez 
map join only) (Matt McCline, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ff10f033
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ff10f033
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ff10f033

Branch: refs/heads/llap
Commit: ff10f03371f5ff54d34a28938c5d6e69940113ea
Parents: 44ab455
Author: Matt McCline 
Authored: Tue Mar 29 01:52:48 2016 -0700
Committer: Matt McCline 
Committed: Tue Mar 29 01:54:51 2016 -0700

--
 .../test/resources/testconfiguration.properties |   1 +
 .../ql/exec/vector/VectorizationContext.java|  14 +-
 .../mapjoin/VectorMapJoinCommonOperator.java|   2 +-
 .../hive/ql/optimizer/physical/Vectorizer.java  |   7 +
 .../ql/optimizer/physical/TestVectorizer.java   |   5 +
 .../clientpositive/vector_complex_join.q|  29 +++
 .../tez/vector_complex_join.q.out   | 227 +++
 .../clientpositive/vector_complex_join.q.out| 225 ++
 8 files changed, 502 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/ff10f033/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index 0672e0e..ed26dea 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -258,6 +258,7 @@ minitez.query.files.shared=acid_globallimit.q,\
   vector_coalesce.q,\
   vector_coalesce_2.q,\
   vector_complex_all.q,\
+  vector_complex_join.q,\
   vector_count_distinct.q,\
   vector_data_types.q,\
   vector_date_1.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/ff10f033/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
index 0552f9d..1eb960d 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
@@ -342,7 +342,7 @@ public class VectorizationContext {
 
 private final Set usedOutputColumns = new HashSet();
 
-int allocateOutputColumn(String hiveTypeName) {
+int allocateOutputColumn(String hiveTypeName) throws HiveException {
 if (initialOutputCol < 0) {
   // This is a test
   return 0;
@@ -403,7 +403,7 @@ public class VectorizationContext {
 }
   }
 
-  public int allocateScratchColumn(String hiveTypeName) {
+  public int allocateScratchColumn(String hiveTypeName) throws HiveException {
 return ocm.allocateOutputColumn(hiveTypeName);
   }
 
@@ -2243,7 +2243,7 @@ public class VectorizationContext {
 }
   }
 
-  static String getNormalizedName(String hiveTypeName) {
+  static String getNormalizedName(String hiveTypeName) throws HiveException {
 VectorExpressionDescriptor.ArgumentType argType = 
VectorExpressionDescriptor.ArgumentType.fromHiveTypeName(hiveTypeName);
 switch (argType) {
 case INT_FAMILY:
@@ -2269,11 +2269,11 @@ public class VectorizationContext {
 case INTERVAL_DAY_TIME:
   return hiveTypeName;
 default:
-  return "None";
+  throw new HiveException("Unexpected hive type name " + hiveTypeName);
 }
   }
 
-  static String getUndecoratedName(String hiveTypeName) {
+  static String getUndecoratedName(String hiveTypeName) throws HiveException {
 VectorExpressionDescriptor.ArgumentType argType = 
VectorExpressionDescriptor.ArgumentType.fromHiveTypeName(hiveTypeName);
 switch (argType) {
 case INT_FAMILY:
@@ -2296,7 +2296,7 @@ public class VectorizationContext {
 case INTERVAL_DAY_TIME:
   return hiveTypeName;
 default:
-  return "None";
+  throw new HiveException("Unexpected hive type name " + hiveTypeName);
 }
   }
 
@@ -2511,7 +2511,7 @@ public class VectorizationContext {
 }
 sb.append("sorted projectionColumnMap ").append(sortedColumnMap).append(", 
");
 
-sb.append("scratchColumnTypeNames 
").append(getScratchColumnTypeNames().toString());
+sb.append("scratchColumnTypeNames 
").append(Arrays.toString(getScratchColumnTypeNames()));
 
 return sb.toString();
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/ff10f033/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java

[35/50] [abbrv] hive git commit: HIVE-13111: Fix timestamp / interval_day_time wrong results with HIVE-9862 (Matt McCline, reviewed by Jason Dere)

2016-04-04 Thread jdere
HIVE-13111: Fix timestamp / interval_day_time wrong results with HIVE-9862 
(Matt McCline, reviewed by Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/52016296
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/52016296
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/52016296

Branch: refs/heads/llap
Commit: 52016296cf7de89d868e1073a3376b330fee0955
Parents: 761b547
Author: Matt McCline 
Authored: Mon Mar 28 14:14:37 2016 -0700
Committer: Matt McCline 
Committed: Mon Mar 28 14:14:37 2016 -0700

--
 .../apache/hadoop/hive/ant/GenVectorCode.java   |  531 -
 .../hive/common/type/HiveIntervalDayTime.java   |  245 
 .../org/apache/hive/common/util/DateUtils.java  |   19 -
 data/files/timestamps.txt   |   50 +
 .../test/resources/testconfiguration.properties |2 +
 .../java/org/apache/orc/impl/WriterImpl.java|   23 +-
 ...eColumnArithmeticIntervalYearMonthColumn.txt |   56 +-
 ...eColumnArithmeticIntervalYearMonthScalar.txt |   55 +-
 .../DateColumnArithmeticTimestampColumn.txt |  141 ++-
 .../DateColumnArithmeticTimestampColumnBase.txt |  171 ---
 .../DateColumnArithmeticTimestampScalar.txt |  113 +-
 .../DateColumnArithmeticTimestampScalarBase.txt |  137 ---
 ...eScalarArithmeticIntervalYearMonthColumn.txt |   53 +-
 .../DateScalarArithmeticTimestampColumn.txt |  108 +-
 .../DateScalarArithmeticTimestampColumnBase.txt |  147 ---
 ...ayTimeColumnCompareIntervalDayTimeColumn.txt |   52 -
 ...ayTimeColumnCompareIntervalDayTimeScalar.txt |   55 -
 ...ayTimeScalarCompareIntervalDayTimeColumn.txt |   55 -
 ...erLongDoubleColumnCompareTimestampColumn.txt |2 +-
 ...erLongDoubleColumnCompareTimestampScalar.txt |4 +-
 ...erLongDoubleScalarCompareTimestampColumn.txt |4 +
 .../FilterTimestampColumnBetween.txt|   11 +-
 ...terTimestampColumnCompareTimestampColumn.txt |  417 ++-
 ...imestampColumnCompareTimestampColumnBase.txt |  429 ---
 ...terTimestampColumnCompareTimestampScalar.txt |  128 ++-
 ...imestampColumnCompareTimestampScalarBase.txt |  145 ---
 ...erTimestampScalarCompareLongDoubleColumn.txt |3 +-
 ...terTimestampScalarCompareTimestampColumn.txt |  132 ++-
 ...imestampScalarCompareTimestampColumnBase.txt |  147 ---
 ...ayTimeColumnCompareIntervalDayTimeColumn.txt |   54 -
 ...ayTimeColumnCompareIntervalDayTimeScalar.txt |   57 -
 ...ayTimeScalarCompareIntervalDayTimeColumn.txt |   57 -
 ...ervalYearMonthColumnArithmeticDateColumn.txt |   55 +-
 ...ervalYearMonthColumnArithmeticDateScalar.txt |   51 +-
 ...YearMonthColumnArithmeticTimestampColumn.txt |   63 +-
 ...YearMonthColumnArithmeticTimestampScalar.txt |   48 +-
 ...ervalYearMonthScalarArithmeticDateColumn.txt |   51 +-
 ...YearMonthScalarArithmeticTimestampColumn.txt |   55 +-
 .../LongDoubleColumnCompareTimestampColumn.txt  |1 -
 .../LongDoubleColumnCompareTimestampScalar.txt  |3 +-
 .../LongDoubleScalarCompareTimestampColumn.txt  |1 +
 .../TimestampColumnArithmeticDateColumn.txt |  138 ++-
 .../TimestampColumnArithmeticDateColumnBase.txt |  172 ---
 .../TimestampColumnArithmeticDateScalar.txt |   98 +-
 .../TimestampColumnArithmeticDateScalarBase.txt |  126 --
 ...pColumnArithmeticIntervalYearMonthColumn.txt |   59 +-
 ...pColumnArithmeticIntervalYearMonthScalar.txt |   41 +-
 ...TimestampColumnArithmeticTimestampColumn.txt |  128 ++-
 ...stampColumnArithmeticTimestampColumnBase.txt |  152 ---
 ...TimestampColumnArithmeticTimestampScalar.txt |   96 +-
 ...stampColumnArithmeticTimestampScalarBase.txt |  125 --
 .../TimestampColumnCompareLongDoubleScalar.txt  |1 +
 .../TimestampColumnCompareTimestampColumn.txt   |  122 +-
 ...imestampColumnCompareTimestampColumnBase.txt |  140 ---
 .../TimestampColumnCompareTimestampScalar.txt   |  114 +-
 ...imestampColumnCompareTimestampScalarBase.txt |  131 ---
 .../TimestampScalarArithmeticDateColumn.txt |  117 +-
 .../TimestampScalarArithmeticDateColumnBase.txt |  151 ---
 ...pScalarArithmeticIntervalYearMonthColumn.txt |   62 +-
 ...TimestampScalarArithmeticTimestampColumn.txt |  103 +-
 ...stampScalarArithmeticTimestampColumnBase.txt |  139 ---
 .../TimestampScalarCompareLongDoubleColumn.txt  |4 +-
 .../TimestampScalarCompareTimestampColumn.txt   |  115 +-
 ...imestampScalarCompareTimestampColumnBase.txt |  132 ---
 .../VectorUDAFMinMaxIntervalDayTime.txt |  454 
 .../UDAFTemplates/VectorUDAFMinMaxTimestamp.txt |   31 +-
 .../hive/ql/exec/vector/TimestampUtils.java |8 +
 .../hive/ql/exec/vector/VectorAssignRow.java|   23 +-
 .../exec/vector/VectorColumnAssignFactory.java  |   19 +-
 .../ql/exec/vector/VectorColumnSetInfo.java |   23 +-
 .../hive/ql/exec/vector/VectorCopyRow.java  |   32 +
 .../ql/exec/vector/VectorDeserializeRow.java|   13 +-
 

[47/50] [abbrv] hive git commit: HIVE-13367: Extending HPLSQL parser (Dmitry Tolpeko reviewed by Alan Gates)

2016-04-04 Thread jdere
HIVE-13367: Extending HPLSQL parser (Dmitry Tolpeko reviewed by Alan Gates)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/39d66a43
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/39d66a43
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/39d66a43

Branch: refs/heads/llap
Commit: 39d66a439c02ea2b5c7501b362c0d8f9b8d22cc0
Parents: 55383d8
Author: Dmitry Tolpeko 
Authored: Wed Mar 30 00:19:11 2016 -0700
Committer: Dmitry Tolpeko 
Committed: Wed Mar 30 00:19:11 2016 -0700

--
 .../antlr4/org/apache/hive/hplsql/Hplsql.g4 | 266 +---
 .../main/java/org/apache/hive/hplsql/Conf.java  |   2 +-
 .../main/java/org/apache/hive/hplsql/Conn.java  |   3 +-
 .../java/org/apache/hive/hplsql/Converter.java  |  15 +-
 .../main/java/org/apache/hive/hplsql/Exec.java  | 106 -
 .../java/org/apache/hive/hplsql/Expression.java |  13 +
 .../main/java/org/apache/hive/hplsql/File.java  |  11 +
 .../main/java/org/apache/hive/hplsql/Ftp.java   | 415 +++
 .../main/java/org/apache/hive/hplsql/Meta.java  |  35 +-
 .../java/org/apache/hive/hplsql/Package.java|   3 +
 .../main/java/org/apache/hive/hplsql/Row.java   |   4 +-
 .../java/org/apache/hive/hplsql/Select.java |   6 +-
 .../main/java/org/apache/hive/hplsql/Stmt.java  | 167 +++-
 .../main/java/org/apache/hive/hplsql/Utils.java |  40 ++
 .../main/java/org/apache/hive/hplsql/Var.java   |  18 +-
 .../apache/hive/hplsql/functions/Function.java  |  40 +-
 .../hive/hplsql/functions/FunctionDatetime.java |  40 ++
 .../hive/hplsql/functions/FunctionString.java   |  26 +-
 .../org/apache/hive/hplsql/TestHplsqlLocal.java |  28 +-
 .../apache/hive/hplsql/TestHplsqlOffline.java   |  25 ++
 .../test/queries/db/create_drop_database.sql|   5 +
 .../queries/db/create_procedure_no_params.sql   |  25 ++
 hplsql/src/test/queries/db/describe.sql |   3 +
 hplsql/src/test/queries/db/execute.sql  |   7 +
 hplsql/src/test/queries/db/expression.sql   |   1 +
 hplsql/src/test/queries/db/for.sql  |   1 +
 hplsql/src/test/queries/db/insert.sql   |   3 +
 hplsql/src/test/queries/db/insert_directory.sql |  12 +
 hplsql/src/test/queries/db/schema.sql   |  10 +-
 hplsql/src/test/queries/db/truncate_table.sql   |   2 +
 .../src/test/queries/local/create_function3.sql |  58 +++
 .../src/test/queries/local/create_function4.sql |  19 +
 .../test/queries/local/create_procedure3.sql|  29 ++
 hplsql/src/test/queries/local/declare3.sql  |   7 +
 hplsql/src/test/queries/local/if.sql|   6 +-
 hplsql/src/test/queries/local/interval.sql  |   4 +-
 hplsql/src/test/queries/local/replace.sql   |   1 +
 .../queries/offline/create_table_mssql2.sql |  33 ++
 .../test/queries/offline/create_table_mysql.sql |   5 +
 .../test/queries/offline/create_table_ora2.sql  |   6 +
 .../test/queries/offline/create_table_pg.sql|   5 +
 hplsql/src/test/queries/offline/update.sql  |  33 ++
 .../results/db/create_drop_database.out.txt |   8 +
 .../results/db/create_procedure_mssql.out.txt   |   2 +-
 .../db/create_procedure_no_params.out.txt   |  10 +
 .../db/create_procedure_return_cursor.out.txt   |   4 +-
 .../db/create_procedure_return_cursor2.out.txt  |   4 +-
 hplsql/src/test/results/db/describe.out.txt |  12 +
 hplsql/src/test/results/db/execute.out.txt  |  14 +
 hplsql/src/test/results/db/expression.out.txt   |   5 +
 hplsql/src/test/results/db/for.out.txt  |  44 ++
 hplsql/src/test/results/db/insert.out.txt   |   4 +
 .../test/results/db/insert_directory.out.txt|   9 +
 .../test/results/db/rowtype_attribute.out.txt   |   2 +-
 .../src/test/results/db/truncate_table.out.txt  |   4 +
 .../test/results/local/create_function3.out.txt |  22 +
 .../test/results/local/create_function4.out.txt |   9 +
 .../test/results/local/create_package.out.txt   |   2 +-
 .../results/local/create_procedure3.out.txt |  31 ++
 .../local/create_procedure_no_params.out.txt|  12 +-
 hplsql/src/test/results/local/declare3.out.txt  |   9 +
 hplsql/src/test/results/local/if.out.txt|   4 +
 hplsql/src/test/results/local/interval.out.txt  |   1 +
 hplsql/src/test/results/local/replace.out.txt   |   1 +
 .../results/offline/create_table_mssql2.out.txt |  10 +
 .../results/offline/create_table_mysql.out.txt  |   4 +
 .../results/offline/create_table_ora2.out.txt   |   5 +
 .../results/offline/create_table_pg.out.txt |   5 +
 hplsql/src/test/results/offline/update.out.txt  |  34 ++
 69 files changed, 1672 insertions(+), 137 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/39d66a43/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4
--
diff 

[26/50] [abbrv] hive git commit: HIVE-13111: Fix timestamp / interval_day_time wrong results with HIVE-9862 (Matt McCline, reviewed by Jason Dere)

2016-04-04 Thread jdere
http://git-wip-us.apache.org/repos/asf/hive/blob/52016296/ql/src/test/results/clientpositive/tez/vectorized_casts.q.out
--
diff --git a/ql/src/test/results/clientpositive/tez/vectorized_casts.q.out 
b/ql/src/test/results/clientpositive/tez/vectorized_casts.q.out
index b7b17de..cf6f4c7 100644
--- a/ql/src/test/results/clientpositive/tez/vectorized_casts.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorized_casts.q.out
@@ -353,18 +353,18 @@ true  NULLtruetruetrueNULLtrue
false   truetrue11  NULL-64615982   1803053750
 true   NULLtruetruetrueNULLtruefalse   truetrue
8   NULL890988972   -1862301000 8   NULL1   15  
NULLNULL8   8   8   8.0 NULL8.90988972E8
-1.862301E9 8.0 NULL1.0 15.892  NULLNULL8.9098899E8 
NULL1969-12-31 16:00:00.008 NULL1970-01-10 23:29:48.972 1969-12-10 
02:41:39 1969-12-31 16:00:08 NULL1969-12-31 16:00:00.001 1969-12-31 
16:00:00 1969-12-31 16:00:15.892 NULLNULL8   NULL890988972  
 -1862301000 8.0 NULLTRUE0   1969-12-31 16:00:15.892 
XylAH4  XylAH4  XylAH4  8.0 1.781977944E9   0.9893582466233818  
8.90988973E8
 true   NULLtruetruetrueNULLtruefalse   truetrue
8   NULL930867246   1205399250  8   NULL1   15  
NULLNULL8   8   8   8.0 NULL9.30867246E8
1.20539925E98.0 NULL1.0 15.892  NULLNULL9.3086726E8 
NULL1969-12-31 16:00:00.008 NULL1970-01-11 10:34:27.246 1970-01-14 
14:49:59.25  1969-12-31 16:00:08 NULL1969-12-31 16:00:00.001 1969-12-31 
16:00:00 1969-12-31 16:00:15.892 NULLNULL8   NULL930867246  
 1205399250  8.0 NULLTRUE0   1969-12-31 16:00:15.892 
c1V8o1A c1V8o1A c1V8o1A 8.0 1.861734492E9   0.9893582466233818  
9.30867247E8
 true   trueNULLtruetruetrueNULLfalse   trueNULL
-14 -7196   NULL-1552199500 -14 -7196   NULL11  NULL
NULL-14 -14 -14 -14.0   -7196.0 NULL-1.5521995E9-14.0   
-7196.0 NULL11.065  NULLNULLNULL-7196.0 1969-12-31 15:59:59.986 
1969-12-31 15:59:52.804 NULL1969-12-13 16:50:00.5   1969-12-31 15:59:46 
1969-12-31 14:00:04 NULL1969-12-31 16:00:00 1969-12-31 16:00:11.065 
NULLNULL-14 -7196   NULL-1552199500 -14.0   -7196.0 NULL
0   1969-12-31 16:00:11.065 NULLNULLNULL-14.0   NULL
-0.9906073556948704 NULL
-true   trueNULLtruetruetrueNULLfalse   trueNULL
-21 -7196   NULL1542429000  -21 -7196   NULL-4  NULL
NULL-21 -21 -21 -21.0   -7196.0 NULL1.542429E9  -21.0   
-7196.0 NULL-4.1NULLNULLNULL-7196.0 1969-12-31 15:59:59.979 
1969-12-31 15:59:52.804 NULL1970-01-18 12:27:09 1969-12-31 15:59:39 
1969-12-31 14:00:04 NULL1969-12-31 16:00:00 1969-12-31 15:59:55.9   
NULLNULL-21 -7196   NULL1542429000  -21.0   -7196.0 NULL
0   1969-12-31 15:59:55.9   NULLNULLNULL-21.0   NULL
-0.8366556385360561 NULL
-true   trueNULLtruetruetrueNULLfalse   trueNULL
-24 -7196   NULL829111000   -24 -7196   NULL-6  NULL
NULL-24 -24 -24 -24.0   -7196.0 NULL8.29111E8   -24.0   
-7196.0 NULL-6.855  NULLNULLNULL-7196.0 1969-12-31 15:59:59.976 
1969-12-31 15:59:52.804 NULL1970-01-10 06:18:31 1969-12-31 15:59:36 
1969-12-31 14:00:04 NULL1969-12-31 16:00:00 1969-12-31 15:59:53.145 
NULLNULL-24 -7196   NULL829111000   -24.0   -7196.0 NULL
0   1969-12-31 15:59:53.145 NULLNULLNULL-24.0   NULL
0.9055783620066238  NULL
+true   trueNULLtruetruetrueNULLfalse   trueNULL
-21 -7196   NULL1542429000  -21 -7196   NULL-5  NULL
NULL-21 -21 -21 -21.0   -7196.0 NULL1.542429E9  -21.0   
-7196.0 NULL-4.1NULLNULLNULL-7196.0 1969-12-31 15:59:59.979 
1969-12-31 15:59:52.804 NULL1970-01-18 12:27:09 1969-12-31 15:59:39 
1969-12-31 14:00:04 NULL1969-12-31 16:00:00 1969-12-31 15:59:55.9   
NULLNULL-21 -7196   NULL1542429000  -21.0   -7196.0 NULL
0   1969-12-31 15:59:55.9   NULLNULLNULL-21.0   NULL
-0.8366556385360561 NULL
+true   trueNULLtruetruetrueNULLfalse   trueNULL
-24 -7196   NULL829111000   -24 -7196   NULL-7  NULL
NULL-24 -24 -24 -24.0   -7196.0 NULL8.29111E8   -24.0   

[29/50] [abbrv] hive git commit: HIVE-13111: Fix timestamp / interval_day_time wrong results with HIVE-9862 (Matt McCline, reviewed by Jason Dere)

2016-04-04 Thread jdere
http://git-wip-us.apache.org/repos/asf/hive/blob/52016296/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/TimestampColumnInList.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/TimestampColumnInList.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/TimestampColumnInList.java
index 2d7d0c2..bc09a3a 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/TimestampColumnInList.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/TimestampColumnInList.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions;
 import java.sql.Timestamp;
 import java.util.HashSet;
 
-import org.apache.hadoop.hive.common.type.PisaTimestamp;
 import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
 import 
org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor.Descriptor;
 import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
@@ -36,11 +35,8 @@ public class TimestampColumnInList extends VectorExpression 
implements ITimestam
   private Timestamp[] inListValues;
   private int outputColumn;
 
-  private transient PisaTimestamp scratchTimestamp;
-
-
   // The set object containing the IN list.
-  private transient HashSet inSet;
+  private transient HashSet inSet;
 
   public TimestampColumnInList() {
 super();
@@ -64,11 +60,10 @@ public class TimestampColumnInList extends VectorExpression 
implements ITimestam
 }
 
 if (inSet == null) {
-  inSet = new HashSet(inListValues.length);
+  inSet = new HashSet(inListValues.length);
   for (Timestamp val : inListValues) {
-inSet.add(new PisaTimestamp(val));
+inSet.add(val);
   }
-  scratchTimestamp = new PisaTimestamp();
 }
 
 TimestampColumnVector inputColVector = (TimestampColumnVector) 
batch.cols[inputCol];
@@ -91,19 +86,16 @@ public class TimestampColumnInList extends VectorExpression 
implements ITimestam
 
 // All must be selected otherwise size would be zero
 // Repeating property will not change.
-inputColVector.pisaTimestampUpdate(scratchTimestamp, 0);
-outputVector[0] = inSet.contains(scratchTimestamp) ? 1 : 0;
+outputVector[0] = inSet.contains(inputColVector.asScratchTimestamp(0)) 
? 1 : 0;
 outputColVector.isRepeating = true;
   } else if (batch.selectedInUse) {
 for(int j = 0; j != n; j++) {
   int i = sel[j];
-  inputColVector.pisaTimestampUpdate(scratchTimestamp, i);
-  outputVector[i] = inSet.contains(scratchTimestamp) ? 1 : 0;
+  outputVector[i] = 
inSet.contains(inputColVector.asScratchTimestamp(i)) ? 1 : 0;
 }
   } else {
 for(int i = 0; i != n; i++) {
-  inputColVector.pisaTimestampUpdate(scratchTimestamp, i);
-  outputVector[i] = inSet.contains(scratchTimestamp) ? 1 : 0;
+  outputVector[i] = 
inSet.contains(inputColVector.asScratchTimestamp(i)) ? 1 : 0;
 }
   }
 } else {
@@ -112,8 +104,7 @@ public class TimestampColumnInList extends VectorExpression 
implements ITimestam
 //All must be selected otherwise size would be zero
 //Repeating property will not change.
 if (!nullPos[0]) {
-  inputColVector.pisaTimestampUpdate(scratchTimestamp, 0);
-  outputVector[0] = inSet.contains(scratchTimestamp) ? 1 : 0;
+  outputVector[0] = 
inSet.contains(inputColVector.asScratchTimestamp(0)) ? 1 : 0;
   outNulls[0] = false;
 } else {
   outNulls[0] = true;
@@ -124,16 +115,14 @@ public class TimestampColumnInList extends 
VectorExpression implements ITimestam
   int i = sel[j];
   outNulls[i] = nullPos[i];
   if (!nullPos[i]) {
-inputColVector.pisaTimestampUpdate(scratchTimestamp, i);
-outputVector[i] = inSet.contains(scratchTimestamp) ? 1 : 0;
+outputVector[i] = 
inSet.contains(inputColVector.asScratchTimestamp(i)) ? 1 : 0;
   }
 }
   } else {
 System.arraycopy(nullPos, 0, outNulls, 0, n);
 for(int i = 0; i != n; i++) {
   if (!nullPos[i]) {
-inputColVector.pisaTimestampUpdate(scratchTimestamp, i);
-outputVector[i] = inSet.contains(scratchTimestamp) ? 1 : 0;
+outputVector[i] = 
inSet.contains(inputColVector.asScratchTimestamp(i)) ? 1 : 0;
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/52016296/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpressionWriter.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpressionWriter.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpressionWriter.java
index 326bfb9..85dacd7 100644
--- 

[25/50] [abbrv] hive git commit: HIVE-13111: Fix timestamp / interval_day_time wrong results with HIVE-9862 (Matt McCline, reviewed by Jason Dere)

2016-04-04 Thread jdere
http://git-wip-us.apache.org/repos/asf/hive/blob/52016296/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out 
b/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out
new file mode 100644
index 000..cd8111d
--- /dev/null
+++ b/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out
@@ -0,0 +1,1027 @@
+PREHOOK: query: create table unique_timestamps (tsval timestamp) STORED AS 
TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@unique_timestamps
+POSTHOOK: query: create table unique_timestamps (tsval timestamp) STORED AS 
TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@unique_timestamps
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/timestamps.txt' 
OVERWRITE INTO TABLE unique_timestamps
+PREHOOK: type: LOAD
+ A masked pattern was here 
+PREHOOK: Output: default@unique_timestamps
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/timestamps.txt' 
OVERWRITE INTO TABLE unique_timestamps
+POSTHOOK: type: LOAD
+ A masked pattern was here 
+POSTHOOK: Output: default@unique_timestamps
+PREHOOK: query: create table interval_arithmetic_1 (dateval date, tsval 
timestamp) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@interval_arithmetic_1
+POSTHOOK: query: create table interval_arithmetic_1 (dateval date, tsval 
timestamp) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@interval_arithmetic_1
+PREHOOK: query: insert overwrite table interval_arithmetic_1
+  select cast(tsval as date), tsval from unique_timestamps
+PREHOOK: type: QUERY
+PREHOOK: Input: default@unique_timestamps
+PREHOOK: Output: default@interval_arithmetic_1
+POSTHOOK: query: insert overwrite table interval_arithmetic_1
+  select cast(tsval as date), tsval from unique_timestamps
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@unique_timestamps
+POSTHOOK: Output: default@interval_arithmetic_1
+POSTHOOK: Lineage: interval_arithmetic_1.dateval EXPRESSION 
[(unique_timestamps)unique_timestamps.FieldSchema(name:tsval, type:timestamp, 
comment:null), ]
+POSTHOOK: Lineage: interval_arithmetic_1.tsval SIMPLE 
[(unique_timestamps)unique_timestamps.FieldSchema(name:tsval, type:timestamp, 
comment:null), ]
+_c0tsval
+PREHOOK: query: -- interval year-month arithmetic
+explain
+select
+  dateval,
+  dateval - interval '2-2' year to month,
+  dateval - interval '-2-2' year to month,
+  dateval + interval '2-2' year to month,
+  dateval + interval '-2-2' year to month,
+  - interval '2-2' year to month + dateval,
+  interval '2-2' year to month + dateval
+from interval_arithmetic_1
+order by dateval
+PREHOOK: type: QUERY
+POSTHOOK: query: -- interval year-month arithmetic
+explain
+select
+  dateval,
+  dateval - interval '2-2' year to month,
+  dateval - interval '-2-2' year to month,
+  dateval + interval '2-2' year to month,
+  dateval + interval '-2-2' year to month,
+  - interval '2-2' year to month + dateval,
+  interval '2-2' year to month + dateval
+from interval_arithmetic_1
+order by dateval
+POSTHOOK: type: QUERY
+Explain
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+Map Reduce
+  Map Operator Tree:
+  TableScan
+alias: interval_arithmetic_1
+Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE 
Column stats: NONE
+Select Operator
+  expressions: dateval (type: date), (dateval - 2-2) (type: date), 
(dateval - -2-2) (type: date), (dateval + 2-2) (type: date), (dateval + -2-2) 
(type: date), (-2-2 + dateval) (type: date), (2-2 + dateval) (type: date)
+  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6
+  Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE 
Column stats: NONE
+  Reduce Output Operator
+key expressions: _col0 (type: date)
+sort order: +
+Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE 
Column stats: NONE
+value expressions: _col1 (type: date), _col2 (type: date), 
_col3 (type: date), _col4 (type: date), _col5 (type: date), _col6 (type: date)
+  Execution mode: vectorized
+  Reduce Operator Tree:
+Select Operator
+  expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: 
date), VALUE._col1 (type: date), VALUE._col2 (type: date), VALUE._col3 (type: 
date), VALUE._col4 (type: date), VALUE._col5 (type: date)
+  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+  Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE 
Column stats: NONE
+

[06/50] [abbrv] hive git commit: HIVE-13008 - WebHcat DDL commands in secure mode NPE when default FileSystem doesn't support delegation tokens (Eugene Koifman, reviewed by Chris Nauroth, Thejas Nair)

2016-04-04 Thread jdere
HIVE-13008 - WebHcat DDL commands in secure mode NPE when default FileSystem 
doesn't support delegation tokens (Eugene Koifman, reviewed by Chris Nauroth, 
Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ab095f0b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ab095f0b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ab095f0b

Branch: refs/heads/llap
Commit: ab095f0bc24447ab73843a1ae23a32f7b6c4bd1a
Parents: f9d1b6a
Author: Eugene Koifman 
Authored: Thu Mar 24 18:03:32 2016 -0700
Committer: Eugene Koifman 
Committed: Thu Mar 24 18:03:32 2016 -0700

--
 .../hcatalog/templeton/SecureProxySupport.java  | 46 ++--
 1 file changed, 33 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/ab095f0b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java
--
diff --git 
a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java
 
b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java
index 2ac62c0..13f3c9b 100644
--- 
a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java
+++ 
b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/SecureProxySupport.java
@@ -20,10 +20,14 @@ package org.apache.hive.hcatalog.templeton;
 
 import java.io.File;
 import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
 import java.security.PrivilegedExceptionAction;
+import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.commons.lang3.ArrayUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -79,7 +83,7 @@ public class SecureProxySupport {
   this.user = user;
   File t = File.createTempFile("templeton", null);
   tokenPath = new Path(t.toURI());
-  Token fsToken = getFSDelegationToken(user, conf);
+  Token[] fsToken = getFSDelegationToken(user, conf);
   String hcatTokenStr;
   try {
 hcatTokenStr = buildHcatDelegationToken(user);
@@ -130,11 +134,11 @@ public class SecureProxySupport {
 }
   }
 
-  class TokenWrapper {
-Token token;
+  private static class TokenWrapper {
+Token[] tokens = new Token[0];
   }
 
-  private Token getFSDelegationToken(String user,
+  private Token[] getFSDelegationToken(String user,
   final Configuration conf)
 throws IOException, InterruptedException {
 LOG.info("user: " + user + " loginUser: " + 
UserGroupInformation.getLoginUser().getUserName());
@@ -142,18 +146,32 @@ public class SecureProxySupport {
 
 final TokenWrapper twrapper = new TokenWrapper();
 ugi.doAs(new PrivilegedExceptionAction() {
-  public Object run() throws IOException {
-FileSystem fs = FileSystem.get(conf);
-//todo: according to JavaDoc this seems like private API: 
addDelegationToken should be used
-twrapper.token = fs.getDelegationToken(ugi.getShortUserName());
+  public Object run() throws IOException, URISyntaxException {
+Credentials creds = new Credentials();
+//get Tokens for default FS.  Not all FSs support delegation tokens, 
e.g. WASB
+collectTokens(FileSystem.get(conf), twrapper, creds, 
ugi.getShortUserName());
+//get tokens for all other known FSs since Hive tables may result in 
different ones
+//passing "creds" prevents duplicate tokens from being added
+Collection URIs = 
conf.getStringCollection("mapreduce.job.hdfs-servers");
+for(String uri : URIs) {
+  LOG.debug("Getting tokens for " + uri);
+  collectTokens(FileSystem.get(new URI(uri), conf), twrapper, creds, 
ugi.getShortUserName());
+}
 return null;
   }
 });
-return twrapper.token;
-
+return twrapper.tokens;
   }
-
-  private void writeProxyDelegationTokens(final Token fsToken,
+  private static void collectTokens(FileSystem fs, TokenWrapper twrapper, 
Credentials creds, String userName) throws IOException {
+Token[] tokens = fs.addDelegationTokens(userName, creds);
+if(tokens != null && tokens.length > 0) {
+  twrapper.tokens = ArrayUtils.addAll(twrapper.tokens, tokens);
+}
+  }
+  /**
+   * @param fsTokens not null
+   */
+  private void writeProxyDelegationTokens(final Token fsTokens[],
   final Token msToken,
   final Configuration conf,
   String user,
@@ -168,7 +186,9 @@ public class SecureProxySupport {
 ugi.doAs(new PrivilegedExceptionAction() {
   public 

[09/50] [abbrv] hive git commit: HIVE-13262: LLAP: Remove log levels from DebugUtils (Prasanth Jayachandran reviewed by Sergey Shelukhin)

2016-04-04 Thread jdere
HIVE-13262: LLAP: Remove log levels from DebugUtils (Prasanth Jayachandran 
reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3b6b56d7
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3b6b56d7
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3b6b56d7

Branch: refs/heads/llap
Commit: 3b6b56d7000ee1d80c0f191611968d4249f311d7
Parents: dfba1fb
Author: Prasanth Jayachandran 
Authored: Thu Mar 24 20:49:30 2016 -0500
Committer: Prasanth Jayachandran 
Committed: Thu Mar 24 20:49:30 2016 -0500

--
 .../llap/IncrementalObjectSizeEstimator.java|  54 ++---
 .../hadoop/hive/llap/cache/LlapDataBuffer.java  |  12 +-
 .../hive/llap/cache/LowLevelCacheImpl.java  |  35 ++-
 .../llap/cache/LowLevelCacheMemoryManager.java  |   6 +-
 .../llap/cache/LowLevelFifoCachePolicy.java |   4 +-
 .../llap/cache/LowLevelLrfuCachePolicy.java |  14 +-
 .../hive/llap/cache/SimpleBufferManager.java|   8 +-
 .../hive/llap/io/api/impl/LlapInputFormat.java  |  32 +--
 .../hive/llap/io/api/impl/LlapIoImpl.java   |  21 +-
 .../llap/io/decode/OrcColumnVectorProducer.java |   4 +-
 .../llap/io/encoded/OrcEncodedDataReader.java   |  95 +++-
 .../hadoop/hive/llap/old/BufferInProgress.java  |  82 ---
 .../apache/hadoop/hive/llap/old/BufferPool.java | 225 --
 .../hadoop/hive/llap/old/CachePolicy.java   |  34 ---
 .../apache/hadoop/hive/llap/old/ChunkPool.java  | 237 ---
 .../resources/llap-daemon-log4j2.properties |  14 +-
 .../org/apache/hadoop/hive/llap/DebugUtils.java |  43 
 .../org/apache/hadoop/hive/llap/LogLevels.java  |  53 -
 .../ql/exec/vector/VectorGroupByOperator.java   |   2 +-
 .../hive/ql/io/orc/encoded/EncodedReader.java   |   2 +-
 .../ql/io/orc/encoded/EncodedReaderImpl.java| 131 +-
 21 files changed, 192 insertions(+), 916 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/3b6b56d7/llap-server/src/java/org/apache/hadoop/hive/llap/IncrementalObjectSizeEstimator.java
--
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/IncrementalObjectSizeEstimator.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/IncrementalObjectSizeEstimator.java
index 7d68294..3efbcc2 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/IncrementalObjectSizeEstimator.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/IncrementalObjectSizeEstimator.java
@@ -186,9 +186,7 @@ public class IncrementalObjectSizeEstimator {
   fieldCol = (Collection)fieldObj;
   if (fieldCol.size() == 0) {
 fieldCol = null;
-if (DebugUtils.isTraceEnabled()) {
-  LlapIoImpl.LOG.info("Empty collection " + field);
-}
+LlapIoImpl.LOG.trace("Empty collection {}", field);
   }
 }
 if (fieldCol != null) {
@@ -219,9 +217,7 @@ public class IncrementalObjectSizeEstimator {
   fieldCol = (Map)fieldObj;
   if (fieldCol.size() == 0) {
 fieldCol = null;
-if (DebugUtils.isTraceEnabled()) {
-  LlapIoImpl.LOG.info("Empty map " + field);
-}
+LlapIoImpl.LOG.trace("Empty map {}", field);
   }
 }
 if (fieldCol != null) {
@@ -257,15 +253,11 @@ public class IncrementalObjectSizeEstimator {
 return new Class[] { (Class)types[0], (Class)types[1] };
   } else {
 // TODO: we could try to get the declaring object and infer 
argument... stupid Java.
-if (DebugUtils.isTraceEnabled()) {
-  LlapIoImpl.LOG.info("Cannot determine map type: " + field);
-}
+LlapIoImpl.LOG.trace("Cannot determine map type: {}", field);
   }
 } else {
   // TODO: we could try to get superclass or generic interfaces.
-  if (DebugUtils.isTraceEnabled()) {
-LlapIoImpl.LOG.info("Non-parametrized map type: " + field);
-  }
+  LlapIoImpl.LOG.trace("Non-parametrized map type: {}", field);
 }
 return null;
   }
@@ -279,15 +271,11 @@ public class IncrementalObjectSizeEstimator {
 return (Class)type;
   } else {
 // TODO: we could try to get the declaring object and infer 
argument... stupid Java.
-if (DebugUtils.isTraceEnabled()) {
-  LlapIoImpl.LOG.info("Cannot determine collection type: " + field);
-}
+LlapIoImpl.LOG.trace("Cannot determine collection type: {}", field);
   }
 } else {
   // TODO: we could try to get superclass or generic interfaces.
-  if (DebugUtils.isTraceEnabled()) {
-LlapIoImpl.LOG.info("Non-parametrized collection type: " + field);
-  }
+  LlapIoImpl.LOG.trace("Non-parametrized collection type: {}", field);
 }
 return null;
   }

[22/50] [abbrv] hive git commit: HIVE-13358: Stats state is not captured correctly: turn off stats optimizer for sampled table (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

2016-04-04 Thread jdere
HIVE-13358: Stats state is not captured correctly: turn off stats optimizer for 
sampled table (Pengcheng Xiong, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/77474581
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/77474581
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/77474581

Branch: refs/heads/llap
Commit: 77474581df4016e3899a986e079513087a945674
Parents: 41a30b5
Author: Pengcheng Xiong 
Authored: Sun Mar 27 22:30:29 2016 -0700
Committer: Pengcheng Xiong 
Committed: Sun Mar 27 22:30:42 2016 -0700

--
 .../hive/ql/optimizer/StatsOptimizer.java   |  14 +-
 .../sample_islocalmode_hook_use_metadata.q  |  48 
 .../sample_islocalmode_hook_use_metadata.q.out  | 230 +++
 3 files changed, 289 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/77474581/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
index 4091c0d..bc17fec 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
@@ -100,9 +100,12 @@ public class StatsOptimizer extends Transform {
   @Override
   public ParseContext transform(ParseContext pctx) throws SemanticException {
 
-if (pctx.getFetchTask() != null || !pctx.getQueryProperties().isQuery() ||
-pctx.getQueryProperties().isAnalyzeRewrite() || 
pctx.getQueryProperties().isCTAS() ||
-pctx.getLoadFileWork().size() > 1 || 
!pctx.getLoadTableWork().isEmpty()) {
+if (pctx.getFetchTask() != null || !pctx.getQueryProperties().isQuery()
+|| pctx.getQueryProperties().isAnalyzeRewrite() || 
pctx.getQueryProperties().isCTAS()
+|| pctx.getLoadFileWork().size() > 1 || 
!pctx.getLoadTableWork().isEmpty()
+// If getNameToSplitSample is not empty, at least one of the source
+// tables is being sampled and we can not optimize.
+|| !pctx.getNameToSplitSample().isEmpty()) {
   return pctx;
 }
 
@@ -251,6 +254,11 @@ public class StatsOptimizer extends Transform {
   // looks like a subq plan.
   return null;
 }
+if (tsOp.getConf().getRowLimit() != -1) {
+  // table is sampled. In some situation, we really can leverage row
+  // limit. In order to be safe, we do not use it now.
+  return null;
+}
 SelectOperator pselOp = (SelectOperator)stack.get(1);
 for(ExprNodeDesc desc : pselOp.getConf().getColList()) {
   if (!((desc instanceof ExprNodeColumnDesc) || (desc instanceof 
ExprNodeConstantDesc))) {

http://git-wip-us.apache.org/repos/asf/hive/blob/77474581/ql/src/test/queries/clientpositive/sample_islocalmode_hook_use_metadata.q
--
diff --git 
a/ql/src/test/queries/clientpositive/sample_islocalmode_hook_use_metadata.q 
b/ql/src/test/queries/clientpositive/sample_islocalmode_hook_use_metadata.q
new file mode 100644
index 000..ac915b5
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/sample_islocalmode_hook_use_metadata.q
@@ -0,0 +1,48 @@
+set hive.mapred.mode=nonstrict;
+set hive.exec.submitviachild=true;
+set hive.exec.submit.local.task.via.child=true;
+set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
+set mapred.max.split.size=300;
+set mapred.min.split.size=300;
+set mapred.min.split.size.per.node=300;
+set mapred.min.split.size.per.rack=300;
+set hive.exec.mode.local.auto=true;
+set hive.merge.smallfiles.avgsize=1;
+set hive.compute.query.using.stats=true;
+
+-- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S)
+
+-- create file inputs
+create table sih_i_part (key int, value string) partitioned by (p string);
+insert overwrite table sih_i_part partition (p='1') select key, value from src;
+insert overwrite table sih_i_part partition (p='2') select key+1, value 
from src;
+insert overwrite table sih_i_part partition (p='3') select key+2, value 
from src;
+create table sih_src as select key, value from sih_i_part order by key, value;
+create table sih_src2 as select key, value from sih_src order by key, value;
+
+set hive.exec.post.hooks = 
org.apache.hadoop.hive.ql.hooks.VerifyIsLocalModeHook;
+set mapreduce.framework.name=yarn;
+set mapreduce.jobtracker.address=localhost:58;
+set hive.sample.seednumber=7;
+
+-- Relaxing hive.exec.mode.local.auto.input.files.max=1.
+-- Hadoop20 will not generate more splits than there are files (one).
+-- Hadoop23 generate splits correctly 

[18/50] [abbrv] hive git commit: HIVE-12960: Migrate Column Stats Extrapolation and UniformDistribution to HBaseStore (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

2016-04-04 Thread jdere
http://git-wip-us.apache.org/repos/asf/hive/blob/96862093/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsExtrapolation.java
--
diff --git 
a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsExtrapolation.java
 
b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsExtrapolation.java
new file mode 100644
index 000..f4e55ed
--- /dev/null
+++ 
b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsExtrapolation.java
@@ -0,0 +1,717 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hive.metastore.hbase;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.StatObjectConverter;
+import org.apache.hadoop.hive.metastore.api.AggrStats;
+import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+public class TestHBaseAggregateStatsExtrapolation {
+  private static final Logger LOG = LoggerFactory
+  .getLogger(TestHBaseAggregateStatsExtrapolation.class.getName());
+
+  @Mock
+  HTableInterface htable;
+  private HBaseStore store;
+  SortedMap rows = new TreeMap<>();
+
+  // NDV will be 3 for the bitVectors
+  String bitVectors = "{0, 4, 5, 7}{0, 1}{0, 1, 2}{0, 1, 4}{0}{0, 2}{0, 3}{0, 
2, 3, 4}{0, 1, 4}{0, 1}{0}{0, 1, 3, 8}{0, 2}{0, 2}{0, 9}{0, 1, 4}";
+
+  @Before
+  public void before() throws IOException {
+MockitoAnnotations.initMocks(this);
+HiveConf conf = new HiveConf();
+conf.setBoolean(HBaseReadWrite.NO_CACHE_CONF, true);
+store = MockUtils.init(conf, htable, rows);
+store.backdoor().getStatsCache().resetCounters();
+  }
+
+  private static interface Checker {
+void checkStats(AggrStats aggrStats) throws Exception;
+  }
+
+  @Test
+  public void allPartitionsHaveBitVectorStatusLong() throws Exception {
+String dbName = "default";
+String tableName = "snp";
+long now = System.currentTimeMillis();
+List cols = new ArrayList<>();
+cols.add(new FieldSchema("col1", "long", "nocomment"));
+SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
+StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", 
"output", false, 0,
+serde, null, null, Collections. emptyMap());
+List partCols = new ArrayList<>();
+partCols.add(new FieldSchema("ds", "string", ""));
+Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, 
sd, partCols,
+Collections. emptyMap(), null, null, null);
+store.createTable(table);
+
+List partVals = new ArrayList<>();
+for (int i = 0; i < 10; i++) {
+  List partVal = Arrays.asList("" + i);
+  partVals.add(partVal);
+  StorageDescriptor psd = new StorageDescriptor(sd);
+  

[33/50] [abbrv] hive git commit: HIVE-13111: Fix timestamp / interval_day_time wrong results with HIVE-9862 (Matt McCline, reviewed by Jason Dere)

2016-04-04 Thread jdere
http://git-wip-us.apache.org/repos/asf/hive/blob/52016296/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampScalar.txt
--
diff --git 
a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampScalar.txt
 
b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampScalar.txt
index f744d9b..bab8508 100644
--- 
a/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampScalar.txt
+++ 
b/ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampScalar.txt
@@ -20,24 +20,130 @@ package 
org.apache.hadoop.hive.ql.exec.vector.expressions.gen;
 
 import java.sql.Timestamp;
 
-import org.apache.hadoop.hive.common.type.PisaTimestamp;
-
+import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
+import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.*;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 
 /**
- * Generated from template FilterTimestampColumnCompareScalar.txt, which 
covers comparison
- * expressions between a datetime/interval column and a scalar of the same 
type, however output is not
- * produced in a separate column.
+ * Generated from template FilterColumnCompareScalar.txt, which covers binary 
comparison
+ * expressions between a column and a scalar, however output is not produced 
in a separate column.
  * The selected vector of the input {@link VectorizedRowBatch} is updated for 
in-place filtering.
  */
-public class  extends  {
+public class  extends VectorExpression {
+
+  private static final long serialVersionUID = 1L;
+
+  private int colNum;
+  private  value;
 
-  public (int colNum, Timestamp value) {
-super(colNum, new PisaTimestamp(value));
+  public (int colNum,  value) {
+this.colNum = colNum;
+this.value = value;
   }
 
   public () {
-super();
+  }
+
+  @Override
+  public void evaluate(VectorizedRowBatch batch) {
+
+if (childExpressions != null) {
+  super.evaluateChildren(batch);
+}
+
+ // Input #1 is type .
+ inputColVector1 = () 
batch.cols[colNum];
+
+int[] sel = batch.selected;
+boolean[] nullPos = inputColVector1.isNull;
+int n = batch.size;
+
+// return immediately if batch is empty
+if (n == 0) {
+  return;
+}
+
+if (inputColVector1.noNulls) {
+  if (inputColVector1.isRepeating) {
+//All must be selected otherwise size would be zero
+//Repeating property will not change.
+if (!(inputColVector1.compareTo(0, value)  0)) {
+  //Entire batch is filtered out.
+  batch.size = 0;
+}
+  } else if (batch.selectedInUse) {
+int newSize = 0;
+for(int j=0; j != n; j++) {
+  int i = sel[j];
+  if (inputColVector1.compareTo(i, value)  0) {
+sel[newSize++] = i;
+  }
+}
+batch.size = newSize;
+  } else {
+int newSize = 0;
+for(int i = 0; i != n; i++) {
+  if (inputColVector1.compareTo(i, value)  0) {
+sel[newSize++] = i;
+  }
+}
+if (newSize < n) {
+  batch.size = newSize;
+  batch.selectedInUse = true;
+}
+  }
+} else {
+  if (inputColVector1.isRepeating) {
+//All must be selected otherwise size would be zero
+//Repeating property will not change.
+if (!nullPos[0]) {
+  if (!(inputColVector1.compareTo(0, value)  0)) {
+//Entire batch is filtered out.
+batch.size = 0;
+  }
+} else {
+  batch.size = 0;
+}
+  } else if (batch.selectedInUse) {
+int newSize = 0;
+for(int j=0; j != n; j++) {
+  int i = sel[j];
+  if (!nullPos[i]) {
+   if (inputColVector1.compareTo(i, value)  0) {
+ sel[newSize++] = i;
+   }
+  }
+}
+//Change the selected vector
+batch.size = newSize;
+  } else {
+int newSize = 0;
+for(int i = 0; i != n; i++) {
+  if (!nullPos[i]) {
+if (inputColVector1.compareTo(i, value)  0) {
+  sel[newSize++] = i;
+}
+  }
+}
+if (newSize < n) {
+  batch.size = newSize;
+  batch.selectedInUse = true;
+}
+  }
+}
+  }
+
+  @Override
+  public int getOutputColumn() {
+return -1;
+  }
+
+  @Override
+  public String getOutputType() {
+return "boolean";
   }
 
   @Override
@@ -47,8 +153,8 @@ public class  extends  {
 VectorExpressionDescriptor.Mode.FILTER)
 .setNumArguments(2)
 .setArgumentTypes(
-

[03/50] [abbrv] hive git commit: HIVE-12616 : NullPointerException when spark session is reused to run a mapjoin (Nemon Lou, via Szehon)

2016-04-04 Thread jdere
HIVE-12616 : NullPointerException when spark session is reused to run a mapjoin 
(Nemon Lou, via Szehon)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d469e611
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d469e611
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d469e611

Branch: refs/heads/llap
Commit: d469e61108a1844fcc173674bfb2cd9f7ad01c18
Parents: 219d352
Author: Szehon Ho 
Authored: Thu Mar 24 11:12:08 2016 -0700
Committer: Szehon Ho 
Committed: Thu Mar 24 11:12:50 2016 -0700

--
 .../apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/d469e611/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java
index 1798622..2427321 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java
@@ -28,6 +28,7 @@ import java.util.Set;
 
 import org.apache.commons.compress.utils.CharsetNames;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.hive.ql.session.SessionState;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -120,6 +121,9 @@ public class HiveSparkClientFactory {
   sparkMaster = sparkConf.get("spark.master");
   hiveConf.set("spark.master", sparkMaster);
 }
+if (SessionState.get() != null && SessionState.get().getConf() != null) {
+  SessionState.get().getConf().set("spark.master", sparkMaster);
+}
 if (sparkMaster.equals("yarn-cluster")) {
   sparkConf.put("spark.yarn.maxAppAttempts", "1");
 }



[04/50] [abbrv] hive git commit: HIVE-13325: Excessive logging when ORC PPD fails type conversions (Prasanth Jayachandran reviewed by Gopal V)

2016-04-04 Thread jdere
HIVE-13325: Excessive logging when ORC PPD fails type conversions (Prasanth 
Jayachandran reviewed by Gopal V)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d3a5f20b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d3a5f20b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d3a5f20b

Branch: refs/heads/llap
Commit: d3a5f20b4487e241b3e9424d1d762dfca0c25d2f
Parents: d469e61
Author: Prasanth Jayachandran 
Authored: Thu Mar 24 13:30:55 2016 -0500
Committer: Prasanth Jayachandran 
Committed: Thu Mar 24 13:31:08 2016 -0500

--
 .../hadoop/hive/ql/io/orc/RecordReaderImpl.java  | 15 ---
 1 file changed, 12 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/d3a5f20b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
index d511df6..aa835ae 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
@@ -378,9 +378,9 @@ public class RecordReaderImpl implements RecordReader {
 }
 
 TruthValue result;
+Object baseObj = predicate.getLiteral();
 try {
   // Predicate object and stats objects are converted to the type of the 
predicate object.
-  Object baseObj = predicate.getLiteral();
   Object minValue = getBaseObjectForComparison(predicate.getType(), min);
   Object maxValue = getBaseObjectForComparison(predicate.getType(), max);
   Object predObj = getBaseObjectForComparison(predicate.getType(), 
baseObj);
@@ -392,8 +392,17 @@ public class RecordReaderImpl implements RecordReader {
   // in case failed conversion, return the default YES_NO_NULL truth value
 } catch (Exception e) {
   if (LOG.isWarnEnabled()) {
-LOG.warn("Exception when evaluating predicate. Skipping ORC PPD." +
-" Exception: " + ExceptionUtils.getStackTrace(e));
+final String statsType = min == null ?
+(max == null ? "null" : max.getClass().getSimpleName()) :
+min.getClass().getSimpleName();
+final String predicateType = baseObj == null ? "null" : 
baseObj.getClass().getSimpleName();
+final String reason = e.getClass().getSimpleName() + " when evaluating 
predicate." +
+" Skipping ORC PPD." +
+" Exception: " + e.getMessage() +
+" StatsType: " + statsType +
+" PredicateType: " + predicateType;
+LOG.warn(reason);
+LOG.debug(reason, e);
   }
   if 
(predicate.getOperator().equals(PredicateLeaf.Operator.NULL_SAFE_EQUALS) || 
!hasNull) {
 result = TruthValue.YES_NO;



[24/50] [abbrv] hive git commit: HIVE-13111: Fix timestamp / interval_day_time wrong results with HIVE-9862 (Matt McCline, reviewed by Jason Dere)

2016-04-04 Thread jdere
http://git-wip-us.apache.org/repos/asf/hive/blob/52016296/serde/src/java/org/apache/hadoop/hive/serde2/io/TimestampWritable.java
--
diff --git 
a/serde/src/java/org/apache/hadoop/hive/serde2/io/TimestampWritable.java 
b/serde/src/java/org/apache/hadoop/hive/serde2/io/TimestampWritable.java
index fdc64e7..305fdbe 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/io/TimestampWritable.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/io/TimestampWritable.java
@@ -28,7 +28,6 @@ import java.text.SimpleDateFormat;
 import java.util.Date;
 
 import org.apache.hadoop.hive.common.type.HiveDecimal;
-import org.apache.hadoop.hive.common.type.PisaTimestamp;
 import org.apache.hadoop.hive.serde2.ByteStream.RandomAccessOutput;
 import org.apache.hadoop.hive.serde2.lazybinary.LazyBinaryUtils;
 import org.apache.hadoop.hive.serde2.lazybinary.LazyBinaryUtils.VInt;
@@ -323,7 +322,20 @@ public class TimestampWritable implements 
WritableComparable
 return seconds + nanos / 10;
   }
 
+  public static long getLong(Timestamp timestamp) {
+return timestamp.getTime() / 1000;
+  }
 
+  /**
+  *
+  * @return double representation of the timestamp, accurate to nanoseconds
+  */
+ public static double getDouble(Timestamp timestamp) {
+   double seconds, nanos;
+   seconds = millisToSeconds(timestamp.getTime());
+   nanos = timestamp.getNanos();
+   return seconds + nanos / 10;
+ }
 
   public void readFields(DataInput in) throws IOException {
 in.readFully(internalBytes, 0, 4);
@@ -543,6 +555,21 @@ public class TimestampWritable implements 
WritableComparable
 return t;
   }
 
+  public HiveDecimal getHiveDecimal() {
+if (timestampEmpty) {
+  populateTimestamp();
+}
+return getHiveDecimal(timestamp);
+  }
+
+  public static HiveDecimal getHiveDecimal(Timestamp timestamp) {
+// The BigDecimal class recommends not converting directly from double to 
BigDecimal,
+// so we convert through a string...
+Double timestampDouble = TimestampWritable.getDouble(timestamp);
+HiveDecimal result = HiveDecimal.create(timestampDouble.toString());
+return result;
+  }
+
   /**
* Converts the time in seconds or milliseconds to a timestamp.
* @param time time in seconds or in milliseconds
@@ -553,6 +580,17 @@ public class TimestampWritable implements 
WritableComparable
   return new Timestamp(intToTimestampInSeconds ?  time * 1000 : time);
   }
 
+  /**
+   * Converts the time in seconds or milliseconds to a timestamp.
+   * @param time time in seconds or in milliseconds
+   * @return the timestamp
+   */
+  public static void setTimestampFromLong(Timestamp timestamp, long time,
+  boolean intToTimestampInSeconds) {
+  // If the time is in seconds, converts it to milliseconds first.
+timestamp.setTime(intToTimestampInSeconds ?  time * 1000 : time);
+  }
+
   public static Timestamp doubleToTimestamp(double f) {
 long seconds = (long) f;
 
@@ -576,6 +614,37 @@ public class TimestampWritable implements 
WritableComparable
 return t;
   }
 
+  public static void setTimestampFromDouble(Timestamp timestamp, double f) {
+// Otherwise, BigDecimal throws an exception.  (Support vector operations 
that sometimes
+// do work on double Not-a-Number NaN values).
+if (Double.isNaN(f)) {
+  timestamp.setTime(0);
+  return;
+}
+// Algorithm used by TimestampWritable.doubleToTimestamp method.
+// Allocates a BigDecimal object!
+
+long seconds = (long) f;
+
+// We must ensure the exactness of the double's fractional portion.
+// 0.6 as the fraction part will be converted to 0.5... and
+// significantly reduce the savings from binary serialization
+BigDecimal bd = new BigDecimal(String.valueOf(f));
+bd = bd.subtract(new BigDecimal(seconds)).multiply(new 
BigDecimal(10));
+int nanos = bd.intValue();
+
+// Convert to millis
+long millis = seconds * 1000;
+if (nanos < 0) {
+  millis -= 1000;
+  nanos += 10;
+}
+timestamp.setTime(millis);
+
+// Set remaining fractional portion to nanos
+timestamp.setNanos(nanos);
+  }
+
   public static void setTimestamp(Timestamp t, byte[] bytes, int offset) {
 boolean hasDecimalOrSecondVInt = hasDecimalOrSecondVInt(bytes[offset]);
 long seconds = (long) TimestampWritable.getSeconds(bytes, offset);

http://git-wip-us.apache.org/repos/asf/hive/blob/52016296/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveIntervalDayTime.java
--
diff --git 
a/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveIntervalDayTime.java
 
b/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveIntervalDayTime.java
new file mode 100644
index 000..b891e27
--- /dev/null
+++ 
b/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveIntervalDayTime.java
@@ -0,0 

[28/50] [abbrv] hive git commit: HIVE-13111: Fix timestamp / interval_day_time wrong results with HIVE-9862 (Matt McCline, reviewed by Jason Dere)

2016-04-04 Thread jdere
http://git-wip-us.apache.org/repos/asf/hive/blob/52016296/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTypeCasts.java
--
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTypeCasts.java
 
b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTypeCasts.java
index 038e382..1e41fce 100644
--- 
a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTypeCasts.java
+++ 
b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTypeCasts.java
@@ -23,13 +23,19 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.UnsupportedEncodingException;
+import java.math.BigDecimal;
+import java.math.MathContext;
+import java.math.RoundingMode;
+import java.sql.Timestamp;
 import java.util.Arrays;
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
 
 import junit.framework.Assert;
 
 import org.apache.hadoop.hive.common.type.Decimal128;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
-import org.apache.hadoop.hive.common.type.PisaTimestamp;
+import org.apache.hadoop.hive.common.type.RandomTypeUtil;
 import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
@@ -39,6 +45,7 @@ import 
org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.*;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.*;
 import org.apache.hadoop.hive.serde2.io.TimestampWritable;
+import org.apache.hadoop.hive.serde2.typeinfo.HiveDecimalUtils;
 import org.junit.Test;
 
 /**
@@ -84,8 +91,8 @@ public class TestVectorTypeCasts {
 b.cols[0].noNulls = true;
 VectorExpression expr = new CastDoubleToTimestamp(0, 1);
 expr.evaluate(b);
-Assert.assertEquals(0.0, 
resultV.getTimestampSecondsWithFractionalNanos(3));
-Assert.assertEquals(0.5d, 
resultV.getTimestampSecondsWithFractionalNanos(4));
+Assert.assertEquals(0.0, 
TimestampWritable.getDouble(resultV.asScratchTimestamp(3)));
+Assert.assertEquals(0.5d, 
TimestampWritable.getDouble(resultV.asScratchTimestamp(4)));
   }
 
   @Test
@@ -103,39 +110,51 @@ public class TestVectorTypeCasts {
 
   @Test
   public void testCastLongToTimestamp() {
-VectorizedRowBatch b = 
TestVectorMathFunctions.getVectorizedRowBatchLongInTimestampOut();
+long[] longValues = new long[500];
+VectorizedRowBatch b = 
TestVectorMathFunctions.getVectorizedRowBatchLongInTimestampOut(longValues);
 TimestampColumnVector resultV = (TimestampColumnVector) b.cols[1];
 b.cols[0].noNulls = true;
 VectorExpression expr = new CastLongToTimestamp(0, 1);
 expr.evaluate(b);
-Assert.assertEquals(-2, resultV.getTimestampSeconds(0));
-Assert.assertEquals(2, resultV.getTimestampSeconds(1));
+for (int i = 0; i < longValues.length; i++) {
+  Timestamp timestamp = resultV.asScratchTimestamp(i);
+  long actual = TimestampWritable.getLong(timestamp);
+  assertEquals(actual, longValues[i]);
+}
   }
 
   @Test
   public void testCastTimestampToLong() {
-VectorizedRowBatch b = 
TestVectorMathFunctions.getVectorizedRowBatchTimestampInLongOut();
+long[] longValues = new long[500];
+VectorizedRowBatch b = 
TestVectorMathFunctions.getVectorizedRowBatchTimestampInLongOut(longValues);
 TimestampColumnVector inV = (TimestampColumnVector) b.cols[0];
-inV.set(0, new PisaTimestamp(0, PisaTimestamp.NANOSECONDS_PER_SECOND));  
// Make one entry produce interesting result
-  // (1 sec after epoch).
-
 LongColumnVector resultV = (LongColumnVector) b.cols[1];
 b.cols[0].noNulls = true;
 VectorExpression expr = new CastTimestampToLong(0, 1);
 expr.evaluate(b);
-Assert.assertEquals(1, resultV.vector[0]);
+for (int i = 0; i < longValues.length; i++) {
+  long actual = resultV.vector[i];
+  long timestampLong = inV.getTimestampAsLong(i);
+  if (actual != timestampLong) {
+assertTrue(false);
+  }
+}
   }
 
   @Test
   public void testCastTimestampToDouble() {
-VectorizedRowBatch b = 
TestVectorMathFunctions.getVectorizedRowBatchTimestampInDoubleOut();
+double[] doubleValues = new double[500];
+VectorizedRowBatch b = 
TestVectorMathFunctions.getVectorizedRowBatchTimestampInDoubleOut(doubleValues);
 TimestampColumnVector inV = (TimestampColumnVector) b.cols[0];
 DoubleColumnVector resultV = (DoubleColumnVector) b.cols[1];
 b.cols[0].noNulls = true;
 VectorExpression expr = new CastTimestampToDouble(0, 1);
 expr.evaluate(b);
-Assert.assertEquals(-1E-9D , resultV.vector[1]);
-Assert.assertEquals(1E-9D, resultV.vector[3]);
+for (int i = 0; i < doubleValues.length; i++) {
+  double actual = resultV.vector[i];
+  

[11/50] [abbrv] hive git commit: HIVE-12367 : Lock/unlock database should add current database to inputs and outputs of authz hook (Dapeng Sun via Ashutosh Chauhan)

2016-04-04 Thread jdere
HIVE-12367 : Lock/unlock database should add current database to inputs and 
outputs of authz hook (Dapeng Sun via Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6bfec2e9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6bfec2e9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6bfec2e9

Branch: refs/heads/llap
Commit: 6bfec2e97c4e434646aa9aeffd98c9939313fa6e
Parents: a71edcf
Author: Dapeng Sun 
Authored: Tue Dec 15 19:39:00 2015 -0800
Committer: Ashutosh Chauhan 
Committed: Thu Mar 24 19:22:28 2016 -0700

--
 .../java/org/apache/hadoop/hive/ql/Driver.java  | 20 
 .../hive/ql/parse/DDLSemanticAnalyzer.java  | 15 +--
 .../clientnegative/dbtxnmgr_nodblock.q.out  |  2 ++
 .../clientnegative/dbtxnmgr_nodbunlock.q.out|  2 ++
 .../lockneg_query_tbl_in_locked_db.q.out|  6 ++
 .../lockneg_try_db_lock_conflict.q.out  |  6 ++
 .../lockneg_try_drop_locked_db.q.out|  4 
 .../lockneg_try_lock_db_in_use.q.out|  6 ++
 8 files changed, 59 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/6bfec2e9/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java 
b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index d7e4ac7..7276e31 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -1416,6 +1416,10 @@ public class Driver implements CommandProcessor {
 if (!checkConcurrency()) {
   return false;
 }
+// Lock operations themselves don't require the lock.
+if (isExplicitLockOperation()){
+  return false;
+}
 if (!HiveConf.getBoolVar(conf, ConfVars.HIVE_LOCK_MAPRED_ONLY)) {
   return true;
 }
@@ -1438,6 +1442,22 @@ public class Driver implements CommandProcessor {
 return false;
   }
 
+  private boolean isExplicitLockOperation() {
+HiveOperation currentOpt = plan.getOperation();
+if (currentOpt != null) {
+  switch (currentOpt) {
+  case LOCKDB:
+  case UNLOCKDB:
+  case LOCKTABLE:
+  case UNLOCKTABLE:
+return true;
+  default:
+return false;
+  }
+}
+return false;
+  }
+
   private CommandProcessorResponse createProcessorResponse(int ret) {
 queryDisplay.setErrorMessage(errorMessage);
 return new CommandProcessorResponse(ret, errorMessage, SQLState, 
downstreamError);

http://git-wip-us.apache.org/repos/asf/hive/blob/6bfec2e9/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index 0c087ed..fe9b8cc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -2444,8 +2444,12 @@ public class DDLSemanticAnalyzer extends 
BaseSemanticAnalyzer {
 String dbName = unescapeIdentifier(ast.getChild(0).getText());
 String mode  = unescapeIdentifier(ast.getChild(1).getText().toUpperCase());
 
-//inputs.add(new ReadEntity(dbName));
-//outputs.add(new WriteEntity(dbName));
+inputs.add(new ReadEntity(getDatabase(dbName)));
+// Lock database operation is to acquire the lock explicitly, the operation
+// itself doesn't need to be locked. Set the WriteEntity as WriteType:
+// DDL_NO_LOCK here, otherwise it will conflict with Hive's transaction.
+outputs.add(new WriteEntity(getDatabase(dbName), WriteType.DDL_NO_LOCK));
+
 LockDatabaseDesc lockDatabaseDesc = new LockDatabaseDesc(dbName, mode,
 HiveConf.getVar(conf, ConfVars.HIVEQUERYID));
 lockDatabaseDesc.setQueryStr(ctx.getCmd());
@@ -2457,6 +2461,13 @@ public class DDLSemanticAnalyzer extends 
BaseSemanticAnalyzer {
   private void analyzeUnlockDatabase(ASTNode ast) throws SemanticException {
 String dbName = unescapeIdentifier(ast.getChild(0).getText());
 
+inputs.add(new ReadEntity(getDatabase(dbName)));
+// Unlock database operation is to release the lock explicitly, the
+// operation itself don't need to be locked. Set the WriteEntity as
+// WriteType: DDL_NO_LOCK here, otherwise it will conflict with
+// Hive's transaction.
+outputs.add(new WriteEntity(getDatabase(dbName), WriteType.DDL_NO_LOCK));
+
 UnlockDatabaseDesc unlockDatabaseDesc = new UnlockDatabaseDesc(dbName);
 DDLWork work = new DDLWork(getInputs(), getOutputs(), unlockDatabaseDesc);

[46/50] [abbrv] hive git commit: HIVE-13367: Extending HPLSQL parser (Dmitry Tolpeko reviewed by Alan Gates)

2016-04-04 Thread jdere
http://git-wip-us.apache.org/repos/asf/hive/blob/39d66a43/hplsql/src/test/queries/db/create_procedure_no_params.sql
--
diff --git a/hplsql/src/test/queries/db/create_procedure_no_params.sql 
b/hplsql/src/test/queries/db/create_procedure_no_params.sql
new file mode 100644
index 000..bbacb29
--- /dev/null
+++ b/hplsql/src/test/queries/db/create_procedure_no_params.sql
@@ -0,0 +1,25 @@
+CREATE OR REPLACE PROCEDURE show_the_date
+IS
+today DATE DEFAULT DATE '2016-03-10';
+BEGIN
+-- Display the date.
+DBMS_OUTPUT.PUT_LINE ('Today is ' || today);
+END show_the_date;
+
+CREATE OR REPLACE PROCEDURE show_the_date2()
+IS
+today DATE DEFAULT DATE '2016-03-10';
+BEGIN
+-- Display the date.
+DBMS_OUTPUT.PUT_LINE ('Today is ' || today);
+END show_the_date2;
+
+call show_the_date;
+call show_the_date2;
+
+DECLARE
+today DATE DEFAULT DATE '2016-03-10';
+BEGIN
+-- Display the date.
+DBMS_OUTPUT.PUT_LINE ('Today is ' || today);
+END;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/39d66a43/hplsql/src/test/queries/db/describe.sql
--
diff --git a/hplsql/src/test/queries/db/describe.sql 
b/hplsql/src/test/queries/db/describe.sql
new file mode 100644
index 000..f87b532
--- /dev/null
+++ b/hplsql/src/test/queries/db/describe.sql
@@ -0,0 +1,3 @@
+describe src;
+desc src;
+desc table src;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/39d66a43/hplsql/src/test/queries/db/execute.sql
--
diff --git a/hplsql/src/test/queries/db/execute.sql 
b/hplsql/src/test/queries/db/execute.sql
new file mode 100644
index 000..abc23c4
--- /dev/null
+++ b/hplsql/src/test/queries/db/execute.sql
@@ -0,0 +1,7 @@
+DECLARE cnt INT;
+EXECUTE 'SELECT COUNT(*) FROM src' INTO cnt;
+PRINT cnt;
+
+DECLARE sql STRING = 'SELECT COUNT(*) FROM src';
+EXECUTE sql INTO cnt;
+PRINT cnt;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/39d66a43/hplsql/src/test/queries/db/expression.sql
--
diff --git a/hplsql/src/test/queries/db/expression.sql 
b/hplsql/src/test/queries/db/expression.sql
new file mode 100644
index 000..cd82e7a
--- /dev/null
+++ b/hplsql/src/test/queries/db/expression.sql
@@ -0,0 +1 @@
+select 0.12*0.14, 0.13 / 0.11, 1 + 2, 1 - 2, (1-2)+3, 3 * (2-1) from src limit 
1;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/39d66a43/hplsql/src/test/queries/db/for.sql
--
diff --git a/hplsql/src/test/queries/db/for.sql 
b/hplsql/src/test/queries/db/for.sql
new file mode 100644
index 000..19dac9a
--- /dev/null
+++ b/hplsql/src/test/queries/db/for.sql
@@ -0,0 +1 @@
+for item in (select code from sample_07 limit 10  ) loop print(cast(item.code 
as varchar2(100))+' aa') end loop;

http://git-wip-us.apache.org/repos/asf/hive/blob/39d66a43/hplsql/src/test/queries/db/insert.sql
--
diff --git a/hplsql/src/test/queries/db/insert.sql 
b/hplsql/src/test/queries/db/insert.sql
new file mode 100644
index 000..b67796a
--- /dev/null
+++ b/hplsql/src/test/queries/db/insert.sql
@@ -0,0 +1,3 @@
+insert overwrite table src_insert select value from src;
+
+insert into table src_insert select value from src;

http://git-wip-us.apache.org/repos/asf/hive/blob/39d66a43/hplsql/src/test/queries/db/insert_directory.sql
--
diff --git a/hplsql/src/test/queries/db/insert_directory.sql 
b/hplsql/src/test/queries/db/insert_directory.sql
new file mode 100644
index 000..b9a58d1
--- /dev/null
+++ b/hplsql/src/test/queries/db/insert_directory.sql
@@ -0,0 +1,12 @@
+insert overwrite directory /tmp/src1
+  select * from src;
+  
+insert overwrite local directory /tmp/src2
+  select * from src;
+  
+insert overwrite local directory '/tmp/src3'
+  'select * from ' || 'src';
+  
+declare tabname string = 'src';
+insert overwrite local directory '/tmp/src_' || date '2016-03-28' 
+  'select * from ' || tabname;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/39d66a43/hplsql/src/test/queries/db/schema.sql
--
diff --git a/hplsql/src/test/queries/db/schema.sql 
b/hplsql/src/test/queries/db/schema.sql
index 0c41569..e003934 100644
--- a/hplsql/src/test/queries/db/schema.sql
+++ b/hplsql/src/test/queries/db/schema.sql
@@ -29,4 +29,12 @@ select
   cast(key as double)/10 c10,
   date '2015-09-07' c11,
   cast(date '2015-09-07' as timestamp) c12
-from src;
\ No newline at end of file
+from src;
+
+create table if not exists src_empty (
+  c1 string)
+;
+
+create table if not exists src_insert (
+  c1 string)

[10/50] [abbrv] hive git commit: HIVE-9499 : hive.limit.query.max.table.partition makes queries fail on non-partitioned tables (Navis via Ashutosh Chauhan)

2016-04-04 Thread jdere
HIVE-9499 : hive.limit.query.max.table.partition makes queries fail on 
non-partitioned tables (Navis via Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a71edcf6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a71edcf6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a71edcf6

Branch: refs/heads/llap
Commit: a71edcf6a5672452a8e00c2bad4f20cffced26d9
Parents: 3b6b56d
Author: Navis Ryu 
Authored: Sun Feb 8 17:57:00 2015 -0800
Committer: Ashutosh Chauhan 
Committed: Thu Mar 24 19:09:47 2016 -0700

--
 .../ql/optimizer/stats/annotation/StatsRulesProcFactory.java | 3 +--
 .../java/org/apache/hadoop/hive/ql/parse/ParseContext.java   | 5 +
 .../org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java| 8 ++--
 3 files changed, 12 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/a71edcf6/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
index 4bcf6bf..c4fc5ca 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
@@ -105,8 +105,7 @@ public class StatsRulesProcFactory {
 Object... nodeOutputs) throws SemanticException {
   TableScanOperator tsop = (TableScanOperator) nd;
   AnnotateStatsProcCtx aspCtx = (AnnotateStatsProcCtx) procCtx;
-  PrunedPartitionList partList =
-  aspCtx.getParseContext().getPrunedPartitions(tsop.getName(), tsop);
+  PrunedPartitionList partList = 
aspCtx.getParseContext().getPrunedPartitions(tsop);
   Table table = tsop.getConf().getTableMetadata();
 
   try {

http://git-wip-us.apache.org/repos/asf/hive/blob/a71edcf6/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
index 4f784d1..95c254c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
@@ -466,6 +466,11 @@ public class ParseContext {
 this.fetchTask = fetchTask;
   }
 
+  public PrunedPartitionList getPrunedPartitions(TableScanOperator ts)
+  throws SemanticException {
+return getPrunedPartitions(ts.getConf().getAlias(), ts);
+  }
+
   public PrunedPartitionList getPrunedPartitions(String alias, 
TableScanOperator ts)
   throws SemanticException {
 PrunedPartitionList partsList = opToPartList.get(ts);

http://git-wip-us.apache.org/repos/asf/hive/blob/a71edcf6/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index d9db1d5..adee14b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -10776,10 +10776,14 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
 // check whether any of them break the limit
 for (Operator topOp : topOps.values()) {
   if (topOp instanceof TableScanOperator) {
-if (((TableScanDesc)topOp.getConf()).getIsMetadataOnly()) {
+TableScanOperator tsOp = (TableScanOperator) topOp;
+if (tsOp.getConf().getIsMetadataOnly()) {
+  continue;
+}
+PrunedPartitionList parts = pCtx.getPrunedPartitions(tsOp);
+if (!parts.getSourceTable().isPartitioned()) {
   continue;
 }
-PrunedPartitionList parts = pCtx.getOpToPartList().get(topOp);
 if (parts.getPartitions().size() > scanLimit) {
   throw new 
SemanticException(ErrorMsg.PARTITION_SCAN_LIMIT_EXCEEDED, ""
   + parts.getPartitions().size(), "" + 
parts.getSourceTable().getTableName(), ""



[30/50] [abbrv] hive git commit: HIVE-13111: Fix timestamp / interval_day_time wrong results with HIVE-9862 (Matt McCline, reviewed by Jason Dere)

2016-04-04 Thread jdere
http://git-wip-us.apache.org/repos/asf/hive/blob/52016296/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDoubleToTimestamp.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDoubleToTimestamp.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDoubleToTimestamp.java
index 39823fe..31d2f78 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDoubleToTimestamp.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastDoubleToTimestamp.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions;
 
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.*;
+import org.apache.hadoop.hive.serde2.io.TimestampWritable;
 
 public class CastDoubleToTimestamp extends VectorExpression {
   private static final long serialVersionUID = 1L;
@@ -37,9 +38,11 @@ public class CastDoubleToTimestamp extends VectorExpression {
 super();
   }
 
-  private void setSecondsWithFractionalNanoseconds(TimestampColumnVector 
timestampColVector,
+  private void setDouble(TimestampColumnVector timestampColVector,
   double[] vector, int elementNum) {
-
timestampColVector.setTimestampSecondsWithFractionalNanoseconds(elementNum, 
vector[elementNum]);
+TimestampWritable.setTimestampFromDouble(
+timestampColVector.getScratchTimestamp(), vector[elementNum]);
+timestampColVector.setFromScratchTimestamp(elementNum);
   }
 
   @Override
@@ -66,7 +69,7 @@ public class CastDoubleToTimestamp extends VectorExpression {
 if (inputColVector.isRepeating) {
   //All must be selected otherwise size would be zero
   //Repeating property will not change.
-  setSecondsWithFractionalNanoseconds(outputColVector, vector, 0);
+  setDouble(outputColVector, vector, 0);
   // Even if there are no nulls, we always copy over entry 0. Simplifies 
code.
   outputIsNull[0] = inputIsNull[0];
   outputColVector.isRepeating = true;
@@ -74,11 +77,11 @@ public class CastDoubleToTimestamp extends VectorExpression 
{
   if (batch.selectedInUse) {
 for(int j = 0; j != n; j++) {
   int i = sel[j];
-  setSecondsWithFractionalNanoseconds(outputColVector, vector, i);
+  setDouble(outputColVector, vector, i);
 }
   } else {
 for(int i = 0; i != n; i++) {
-  setSecondsWithFractionalNanoseconds(outputColVector, vector, i);
+  setDouble(outputColVector, vector, i);
 }
   }
   outputColVector.isRepeating = false;
@@ -86,12 +89,12 @@ public class CastDoubleToTimestamp extends VectorExpression 
{
   if (batch.selectedInUse) {
 for(int j = 0; j != n; j++) {
   int i = sel[j];
-  setSecondsWithFractionalNanoseconds(outputColVector, vector, i);
+  setDouble(outputColVector, vector, i);
   outputIsNull[i] = inputIsNull[i];
 }
   } else {
 for(int i = 0; i != n; i++) {
-  setSecondsWithFractionalNanoseconds(outputColVector, vector, i);
+  setDouble(outputColVector, vector, i);
 }
 System.arraycopy(inputIsNull, 0, outputIsNull, 0, n);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/52016296/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToTimestamp.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToTimestamp.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToTimestamp.java
index d344d4d..a2ee52d 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToTimestamp.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CastLongToTimestamp.java
@@ -20,8 +20,7 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions;
 
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.*;
-import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
-import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
+import org.apache.hadoop.hive.serde2.io.TimestampWritable;
 
 public class CastLongToTimestamp extends VectorExpression {
   private static final long serialVersionUID = 1L;
@@ -40,7 +39,10 @@ public class CastLongToTimestamp extends VectorExpression {
   }
 
   private void setSeconds(TimestampColumnVector timestampColVector, long[] 
vector, int elementNum) {
-timestampColVector.setTimestampSeconds(elementNum, vector[elementNum]);
+TimestampWritable.setTimestampFromLong(
+timestampColVector.getScratchTimestamp(), vector[elementNum],
+/* intToTimestampInSeconds */ true);
+timestampColVector.setFromScratchTimestamp(elementNum);
   }
 
   @Override


[16/50] [abbrv] hive git commit: HIVE-12653 : The property "serialization.encoding" in the class "org.apache.hadoop.hive.contrib.serde2.MultiDelimitSerDe" does not work (yangfeng via Ashutosh Chauhan)

2016-04-04 Thread jdere
HIVE-12653 : The property  "serialization.encoding" in the class 
"org.apache.hadoop.hive.contrib.serde2.MultiDelimitSerDe" does not work 
(yangfeng via Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2449d1df
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2449d1df
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2449d1df

Branch: refs/heads/llap
Commit: 2449d1dfe9429363a9458d2004ec2405f5aa9035
Parents: e384b2b
Author: yangfang 
Authored: Mon Dec 14 03:13:00 2015 -0800
Committer: Ashutosh Chauhan 
Committed: Fri Mar 25 07:43:05 2016 -0700

--
 .../hive/contrib/serde2/MultiDelimitSerDe.java  | 23 +++-
 1 file changed, 18 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/2449d1df/contrib/src/java/org/apache/hadoop/hive/contrib/serde2/MultiDelimitSerDe.java
--
diff --git 
a/contrib/src/java/org/apache/hadoop/hive/contrib/serde2/MultiDelimitSerDe.java 
b/contrib/src/java/org/apache/hadoop/hive/contrib/serde2/MultiDelimitSerDe.java
index 9a162d5..296c449 100644
--- 
a/contrib/src/java/org/apache/hadoop/hive/contrib/serde2/MultiDelimitSerDe.java
+++ 
b/contrib/src/java/org/apache/hadoop/hive/contrib/serde2/MultiDelimitSerDe.java
@@ -63,7 +63,7 @@ import org.apache.hadoop.io.Writable;
 serdeConstants.SERIALIZATION_ENCODING,
 LazySerDeParameters.SERIALIZATION_EXTEND_NESTING_LEVELS,
 LazySerDeParameters.SERIALIZATION_EXTEND_ADDITIONAL_NESTING_LEVELS})
-public class MultiDelimitSerDe extends AbstractSerDe {
+public class MultiDelimitSerDe extends AbstractEncodingAwareSerDe {
 
   private static final byte[] DEFAULT_SEPARATORS = {(byte) 1, (byte) 2, (byte) 
3};
   // Due to HIVE-6404, define our own constant
@@ -94,6 +94,7 @@ public class MultiDelimitSerDe extends AbstractSerDe {
   @Override
   public void initialize(Configuration conf, Properties tbl) throws 
SerDeException {
 // get the SerDe parameters
+super.initialize(conf, tbl);
 serdeParams = new LazySerDeParameters(conf, tbl, getClass().getName());
 
 fieldDelimited = tbl.getProperty(serdeConstants.FIELD_DELIM);
@@ -134,8 +135,9 @@ public class MultiDelimitSerDe extends AbstractSerDe {
 return Text.class;
   }
 
-  @Override
-  public Object deserialize(Writable blob) throws SerDeException {
+
+  @Override 
+  public Object doDeserialize(Writable blob) throws SerDeException {
 if (byteArrayRef == null) {
   byteArrayRef = new ByteArrayRef();
 }
@@ -159,8 +161,9 @@ public class MultiDelimitSerDe extends AbstractSerDe {
 return cachedLazyStruct;
   }
 
-  @Override
-  public Writable serialize(Object obj, ObjectInspector objInspector) throws 
SerDeException {
+  @Override 
+  public Writable doSerialize(Object obj, ObjectInspector objInspector)
+  throws SerDeException {
 StructObjectInspector soi = (StructObjectInspector) objInspector;
 List fields = soi.getAllStructFieldRefs();
 List list = soi.getStructFieldsDataAsList(obj);
@@ -286,6 +289,16 @@ public class MultiDelimitSerDe extends AbstractSerDe {
 throw new RuntimeException("Unknown category type: "+ 
objInspector.getCategory());
   }
 
+  protected Text transformFromUTF8(Writable blob) {
+Text text = (Text)blob;
+return SerDeUtils.transformTextFromUTF8(text, this.charset);
+  }
+
+  protected Text transformToUTF8(Writable blob) {
+Text text = (Text) blob;
+return SerDeUtils.transformTextToUTF8(text, this.charset);
+  }
+
   @Override
   public SerDeStats getSerDeStats() {
 // no support for statistics



[43/50] [abbrv] hive git commit: HIVE-13303 : spill to YARN directories, not tmp, when available (Sergey Shelukhin, reviewed by Gopal V)

2016-04-04 Thread jdere
HIVE-13303 : spill to YARN directories, not tmp, when available (Sergey 
Shelukhin, reviewed by Gopal V)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/20a8192a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/20a8192a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/20a8192a

Branch: refs/heads/llap
Commit: 20a8192a2b8f36da5ef2d5d61d77de1e70188b1d
Parents: 56b6459
Author: Sergey Shelukhin 
Authored: Tue Mar 29 18:57:27 2016 -0700
Committer: Sergey Shelukhin 
Committed: Tue Mar 29 18:57:27 2016 -0700

--
 .../apache/hadoop/hive/common/FileUtils.java| 54 
 .../hadoop/hive/llap/io/api/LlapProxy.java  |  2 +
 .../org/apache/hadoop/hive/llap/LlapUtil.java   | 26 ++
 .../hive/llap/daemon/impl/LlapDaemon.java   |  6 +--
 .../persistence/HybridHashTableContainer.java   | 40 ++-
 .../ql/exec/persistence/KeyValueContainer.java  | 25 +
 .../ql/exec/persistence/ObjectContainer.java| 24 -
 .../hive/ql/exec/persistence/RowContainer.java  | 34 ++--
 .../hadoop/hive/ql/exec/tez/DagUtils.java   |  1 +
 .../mapjoin/VectorMapJoinRowBytesContainer.java | 24 -
 .../ql/exec/persistence/TestHashPartition.java  |  3 +-
 .../TestVectorMapJoinRowBytesContainer.java |  3 +-
 12 files changed, 169 insertions(+), 73 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/20a8192a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java 
b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
index 8c9bd3d..51340d8 100644
--- a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
@@ -27,6 +27,7 @@ import java.security.AccessControlException;
 import java.security.PrivilegedExceptionAction;
 import java.util.BitSet;
 import java.util.List;
+import java.util.Random;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
@@ -43,6 +44,7 @@ import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.hive.shims.Utils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hive.common.util.ShutdownHookManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -53,6 +55,7 @@ import org.slf4j.LoggerFactory;
  */
 public final class FileUtils {
   private static final Logger LOG = 
LoggerFactory.getLogger(FileUtils.class.getName());
+  private static final Random random = new Random();
 
   public static final PathFilter HIDDEN_FILES_PATH_FILTER = new PathFilter() {
 @Override
@@ -827,6 +830,57 @@ public final class FileUtils {
 return tmpFile;
   }
 
+  public static File createLocalDirsTempFile(String localDirList, String 
prefix, String suffix,
+  boolean isDirectory) throws IOException {
+if (localDirList == null || localDirList.isEmpty()) {
+  return createFileInTmp(prefix, suffix, "Local directories not 
specified", isDirectory);
+}
+String[] localDirs = StringUtils.getTrimmedStrings(localDirList);
+if (localDirs.length == 0) {
+  return createFileInTmp(prefix, suffix, "Local directories not 
specified", isDirectory);
+}
+// TODO: we could stagger these to threads by ID, but that can also lead 
to bad effects.
+String path = localDirs[random.nextInt(localDirs.length)];
+if (path == null || path.isEmpty()) {
+  return createFileInTmp(prefix, suffix, "Empty path for one of the local 
dirs", isDirectory);
+}
+File targetDir = new File(path);
+if (!targetDir.exists() && !targetDir.mkdirs()) {
+  return createFileInTmp(prefix, suffix, "Cannot access or create " + 
targetDir, isDirectory);
+}
+try {
+  File file = File.createTempFile(prefix, suffix, targetDir);
+  if (isDirectory && (!file.delete() || !file.mkdirs())) {
+// TODO: or we could just generate a name ourselves and not do this?
+return createFileInTmp(prefix, suffix,
+"Cannot recreate " + file + " as directory", isDirectory);
+  }
+  file.deleteOnExit();
+  return file;
+} catch (IOException ex) {
+  LOG.error("Error creating a file in " + targetDir, ex);
+  return createFileInTmp(prefix, suffix, "Cannot create a file in " + 
targetDir, isDirectory);
+}
+  }
+
+  private static File createFileInTmp(String prefix, String suffix,
+  String reason, boolean isDirectory) throws IOException {
+File file = File.createTempFile(prefix, suffix);
+if (isDirectory && (!file.delete() || 

[40/50] [abbrv] hive git commit: HIVE-12937 : DbNotificationListener unable to clean up old notification events (Sushanth Sowmyan, reviewed by Sergey Shelukhin)

2016-04-04 Thread jdere
HIVE-12937 : DbNotificationListener unable to clean up old notification events 
(Sushanth Sowmyan, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1de97bc5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1de97bc5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1de97bc5

Branch: refs/heads/llap
Commit: 1de97bc5fad323ae3bd48ebb39e6e68a3581e099
Parents: 8c8ff3f
Author: Sushanth Sowmyan 
Authored: Tue Mar 29 11:21:23 2016 -0700
Committer: Sushanth Sowmyan 
Committed: Tue Mar 29 11:24:36 2016 -0700

--
 .../listener/TestDbNotificationListener.java  | 18 ++
 .../apache/hadoop/hive/metastore/ObjectStore.java |  2 +-
 2 files changed, 19 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/1de97bc5/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java
--
diff --git 
a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java
 
b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java
index 6caf3fe..1360563 100644
--- 
a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java
+++ 
b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java
@@ -57,6 +57,7 @@ import java.util.Map;
 public class TestDbNotificationListener {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(TestDbNotificationListener.class.getName());
+  private static final int EVENTS_TTL = 30;
   private static Map emptyParameters = new HashMap();
   private static IMetaStoreClient msClient;
   private static Driver driver;
@@ -68,6 +69,7 @@ public class TestDbNotificationListener {
 HiveConf conf = new HiveConf();
 conf.setVar(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS,
 DbNotificationListener.class.getName());
+conf.setVar(HiveConf.ConfVars.METASTORE_EVENT_DB_LISTENER_TTL, 
String.valueOf(EVENTS_TTL)+"s");
 conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
 conf.setBoolVar(HiveConf.ConfVars.FIRE_EVENTS_FOR_DML, true);
 conf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
@@ -565,4 +567,20 @@ public class TestDbNotificationListener {
 assertEquals(firstEventId + 19, event.getEventId());
 assertEquals(HCatConstants.HCAT_DROP_PARTITION_EVENT, 
event.getEventType());
   }
+
+  @Test
+  public void cleanupNotifs() throws Exception {
+Database db = new Database("cleanup1","no description","file:/tmp", 
emptyParameters);
+msClient.createDatabase(db);
+msClient.dropDatabase("cleanup1");
+
+NotificationEventResponse rsp = msClient.getNextNotification(firstEventId, 
0, null);
+assertEquals(2, rsp.getEventsSize());
+
+// sleep for expiry time, and then fetch again
+Thread.sleep(EVENTS_TTL * 2 * 1000); // sleep twice the TTL interval - 
things should have been cleaned by then.
+
+NotificationEventResponse rsp2 = 
msClient.getNextNotification(firstEventId, 0, null);
+assertEquals(0, rsp2.getEventsSize());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/1de97bc5/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 35adb39..ac293b9 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -7827,7 +7827,7 @@ public class ObjectStore implements RawStore, 
Configurable {
   query.declareParameters("java.lang.Integer tooOld");
   Collection toBeRemoved = (Collection) 
query.execute(tooOld);
   if (toBeRemoved != null && toBeRemoved.size() > 0) {
-pm.deletePersistent(toBeRemoved);
+pm.deletePersistentAll(toBeRemoved);
   }
   commited = commitTransaction();
 } finally {



[36/50] [abbrv] hive git commit: HIVE-13371: Fix test failure of testHasNull in TestColumnStatistics running on Windows (PPengcheng Xiong, reviewed by Ashutosh Chauhan)

2016-04-04 Thread jdere
HIVE-13371: Fix test failure of testHasNull in TestColumnStatistics running on 
Windows (PPengcheng Xiong, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/44ab4553
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/44ab4553
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/44ab4553

Branch: refs/heads/llap
Commit: 44ab45534277920bcf64dbd43409ab730fdb8d61
Parents: 5201629
Author: Pengcheng Xiong 
Authored: Mon Mar 28 16:04:45 2016 -0700
Committer: Pengcheng Xiong 
Committed: Mon Mar 28 16:04:45 2016 -0700

--
 .../org/apache/hadoop/hive/ql/io/orc/TestColumnStatistics.java  | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/44ab4553/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestColumnStatistics.java
--
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestColumnStatistics.java 
b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestColumnStatistics.java
index 9433283..5f0146f 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestColumnStatistics.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestColumnStatistics.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hive.ql.io.orc;
 
 import static junit.framework.Assert.assertEquals;
+import static org.junit.Assume.assumeTrue;
 
 import java.io.File;
 import java.io.FileOutputStream;
@@ -343,7 +344,9 @@ public class TestColumnStatistics {
 FileDump.main(new String[]{testFilePath.toString(), "--rowindex=2"});
 System.out.flush();
 System.setOut(origOut);
-
+// If called with an expression evaluating to false, the test will halt
+// and be ignored.
+assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
 TestFileDump.checkOutput(outputFilename, workDir + File.separator + 
outputFilename);
   }
 }



[08/50] [abbrv] hive git commit: HIVE-13262: LLAP: Remove log levels from DebugUtils (Prasanth Jayachandran reviewed by Sergey Shelukhin)

2016-04-04 Thread jdere
http://git-wip-us.apache.org/repos/asf/hive/blob/3b6b56d7/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
index 29b51ec..f4cfa53 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
@@ -42,7 +42,6 @@ import org.apache.hadoop.hive.ql.io.orc.RecordReaderUtils;
 import org.apache.orc.impl.StreamName;
 import org.apache.orc.StripeInformation;
 import org.apache.orc.impl.BufferChunk;
-import 
org.apache.hadoop.hive.ql.io.orc.RecordReaderUtils.ByteBufferAllocatorPool;
 import org.apache.hadoop.hive.ql.io.orc.encoded.Reader.OrcEncodedColumnBatch;
 import org.apache.hadoop.hive.ql.io.orc.encoded.Reader.PoolFactory;
 import org.apache.orc.OrcProto;
@@ -103,8 +102,7 @@ class EncodedReaderImpl implements EncodedReader {
   private final List types;
   private final long rowIndexStride;
   private final DataCache cache;
-  private ByteBufferAllocatorPool pool;
-  private boolean isDebugTracingEnabled;
+  private boolean isTracingEnabled;
 
   public EncodedReaderImpl(Object fileKey, List types, 
CompressionCodec codec,
   int bufferSize, long strideRate, DataCache cache, DataReader dataReader, 
PoolFactory pf)
@@ -209,8 +207,8 @@ class EncodedReaderImpl implements EncodedReader {
 long offset = 0; // Stream offset in relation to the stripe.
 // 1.1. Figure out which columns have a present stream
 boolean[] hasNull = 
RecordReaderUtils.findPresentStreamsByColumn(streamList, types);
-if (isDebugTracingEnabled) {
-  LOG.info("The following columns have PRESENT streams: " + 
arrayToString(hasNull));
+if (isTracingEnabled) {
+  LOG.trace("The following columns have PRESENT streams: " + 
arrayToString(hasNull));
 }
 
 // We assume stream list is sorted by column and that non-data
@@ -230,8 +228,8 @@ class EncodedReaderImpl implements EncodedReader {
 // We have a stream for included column, but in future it might have 
no data streams.
 // It's more like "has at least one column included that has an index 
stream".
 hasIndexOnlyCols = hasIndexOnlyCols | included[colIx];
-if (isDebugTracingEnabled) {
-  LOG.info("Skipping stream: " + streamKind + " at " + offset + ", " + 
length);
+if (isTracingEnabled) {
+  LOG.trace("Skipping stream: " + streamKind + " at " + offset + ", " 
+ length);
 }
 offset += length;
 continue;
@@ -244,8 +242,8 @@ class EncodedReaderImpl implements EncodedReader {
 includedRgs = colRgs[colRgIx];
 ctx = colCtxs[colRgIx] = new ColumnReadContext(
 colIx, encodings.get(colIx), indexes[colIx]);
-if (isDebugTracingEnabled) {
-  LOG.info("Creating context " + colRgIx + " for column " + colIx + 
":" + ctx.toString());
+if (isTracingEnabled) {
+  LOG.trace("Creating context " + colRgIx + " for column " + colIx + 
":" + ctx.toString());
 }
   } else {
 ctx = colCtxs[colRgIx];
@@ -254,14 +252,14 @@ class EncodedReaderImpl implements EncodedReader {
   int indexIx = RecordReaderUtils.getIndexPosition(ctx.encoding.getKind(),
   types.get(colIx).getKind(), streamKind, isCompressed, 
hasNull[colIx]);
   ctx.addStream(offset, stream, indexIx);
-  if (isDebugTracingEnabled) {
-LOG.info("Adding stream for column " + colIx + ": " + streamKind + " 
at " + offset
+  if (isTracingEnabled) {
+LOG.trace("Adding stream for column " + colIx + ": " + streamKind + " 
at " + offset
 + ", " + length + ", index position " + indexIx);
   }
   if (includedRgs == null || RecordReaderUtils.isDictionary(streamKind, 
encodings.get(colIx))) {
 RecordReaderUtils.addEntireStreamToRanges(offset, length, listToRead, 
true);
-if (isDebugTracingEnabled) {
-  LOG.info("Will read whole stream " + streamKind + "; added to " + 
listToRead.getTail());
+if (isTracingEnabled) {
+  LOG.trace("Will read whole stream " + streamKind + "; added to " + 
listToRead.getTail());
 }
   } else {
 RecordReaderUtils.addRgFilteredStreamToRanges(stream, includedRgs,
@@ -287,15 +285,15 @@ class EncodedReaderImpl implements EncodedReader {
 
 // 2. Now, read all of the ranges from cache or disk.
 DiskRangeList.MutateHelper toRead = new 
DiskRangeList.MutateHelper(listToRead.get());
-if (isDebugTracingEnabled && LOG.isInfoEnabled()) {
-  LOG.info("Resulting disk ranges to read (file " + fileKey + "): "
+if (isTracingEnabled && LOG.isInfoEnabled()) {
+  LOG.trace("Resulting disk ranges to read (file " + fileKey + "): "
   + 

[05/50] [abbrv] hive git commit: HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions (Wei Zheng, reviewed by Eugene Koifman)

2016-04-04 Thread jdere
HIVE-13151 : Clean up UGI objects in FileSystem cache for transactions (Wei 
Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f9d1b6ab
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f9d1b6ab
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f9d1b6ab

Branch: refs/heads/llap
Commit: f9d1b6ab77ab15b8337c17fbe38557c1f7b5ce58
Parents: d3a5f20
Author: Wei Zheng 
Authored: Thu Mar 24 17:29:59 2016 -0700
Committer: Wei Zheng 
Committed: Thu Mar 24 17:29:59 2016 -0700

--
 .../hive/hcatalog/streaming/HiveEndPoint.java   | 11 +
 .../hadoop/hive/ql/txn/compactor/Cleaner.java   |  5 +++
 .../hive/ql/txn/compactor/CompactorThread.java  |  5 +++
 .../hadoop/hive/ql/txn/compactor/Initiator.java |  9 +++-
 .../hadoop/hive/ql/txn/compactor/Worker.java|  8 +++-
 .../apache/hadoop/hive/ql/TestTxnCommands2.java | 47 
 6 files changed, 82 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/f9d1b6ab/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
--
diff --git 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
index 4c77842..baeafad 100644
--- 
a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
+++ 
b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
@@ -18,6 +18,7 @@
 
 package org.apache.hive.hcatalog.streaming;
 
+import org.apache.hadoop.fs.FileSystem;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.cli.CliSessionState;
@@ -342,6 +343,11 @@ public class HiveEndPoint {
 return null;
   }
 } );
+try {
+  FileSystem.closeAllForUGI(ugi);
+} catch (IOException exception) {
+  LOG.error("Could not clean up file-system handles for UGI: " + ugi, 
exception);
+}
   } catch (IOException e) {
 LOG.error("Error closing connection to " + endPt, e);
   } catch (InterruptedException e) {
@@ -937,6 +943,11 @@ public class HiveEndPoint {
   }
 }
 );
+try {
+  FileSystem.closeAllForUGI(ugi);
+} catch (IOException exception) {
+  LOG.error("Could not clean up file-system handles for UGI: " + ugi, 
exception);
+}
   } catch (IOException e) {
 throw new ImpersonationFailed("Failed closing Txn Batch as user '" + 
username +
 "' on  endPoint :" + endPt, e);

http://git-wip-us.apache.org/repos/asf/hive/blob/f9d1b6ab/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
index 9ffeaec..4c31a49 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
@@ -272,6 +272,11 @@ public class Cleaner extends CompactorThread {
 return null;
   }
 });
+try {
+  FileSystem.closeAllForUGI(ugi);
+} catch (IOException exception) {
+  LOG.error("Could not clean up file-system handles for UGI: " + ugi, 
exception + " for " +
+  ci.getFullPartitionName());}
   }
   txnHandler.markCleaned(ci);
 } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/hive/blob/f9d1b6ab/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java 
b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
index 8495c66..4d6e24e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
@@ -174,6 +174,11 @@ abstract class CompactorThread extends Thread implements 
MetaStoreThread {
   return null;
 }
   });
+  try {
+FileSystem.closeAllForUGI(ugi);
+  } catch (IOException exception) {
+LOG.error("Could not clean up file-system handles for UGI: " + ugi, 
exception);
+  }
 
   if (wrapper.size() == 1) {
 LOG.debug("Running job as " + wrapper.get(0));


[17/50] [abbrv] hive git commit: HIVE-13324. LLAP: history log for FRAGMENT_START doesn't log DagId correctly. (Siddharth Seth, Reviewed by Sergey Shelukhin)

2016-04-04 Thread jdere
HIVE-13324. LLAP: history log for FRAGMENT_START doesn't log DagId correctly. 
(Siddharth Seth, Reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3038b05e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3038b05e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3038b05e

Branch: refs/heads/llap
Commit: 3038b05ed346f4b5438e9072edb19186ea90d042
Parents: 2449d1d
Author: Siddharth Seth 
Authored: Sat Mar 26 14:12:36 2016 -0700
Committer: Siddharth Seth 
Committed: Sat Mar 26 14:12:36 2016 -0700

--
 .../apache/hadoop/hive/llap/tez/Converters.java |   1 +
 .../hadoop/hive/llap/tez/TestConverters.java| 190 +++
 2 files changed, 191 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/3038b05e/llap-common/src/java/org/apache/hadoop/hive/llap/tez/Converters.java
--
diff --git 
a/llap-common/src/java/org/apache/hadoop/hive/llap/tez/Converters.java 
b/llap-common/src/java/org/apache/hadoop/hive/llap/tez/Converters.java
index a5c3631..ec6e439 100644
--- a/llap-common/src/java/org/apache/hadoop/hive/llap/tez/Converters.java
+++ b/llap-common/src/java/org/apache/hadoop/hive/llap/tez/Converters.java
@@ -85,6 +85,7 @@ public class Converters {
 FragmentSpecProto.Builder builder = FragmentSpecProto.newBuilder();
 
builder.setFragmentIdentifierString(taskSpec.getTaskAttemptID().toString());
 builder.setDagName(taskSpec.getDAGName());
+builder.setDagId(taskSpec.getDagIdentifier());
 builder.setVertexName(taskSpec.getVertexName());
 builder.setVertexParallelism(taskSpec.getVertexParallelism());
 builder.setFragmentNumber(taskSpec.getTaskAttemptID().getTaskID().getId());

http://git-wip-us.apache.org/repos/asf/hive/blob/3038b05e/llap-common/src/test/org/apache/hadoop/hive/llap/tez/TestConverters.java
--
diff --git 
a/llap-common/src/test/org/apache/hadoop/hive/llap/tez/TestConverters.java 
b/llap-common/src/test/org/apache/hadoop/hive/llap/tez/TestConverters.java
new file mode 100644
index 000..d4cdac1
--- /dev/null
+++ b/llap-common/src/test/org/apache/hadoop/hive/llap/tez/TestConverters.java
@@ -0,0 +1,190 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.tez;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+
+import com.google.common.collect.Lists;
+import com.google.protobuf.ByteString;
+import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto;
+import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentSpecProto;
+import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto;
+import 
org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.tez.dag.api.InputDescriptor;
+import org.apache.tez.dag.api.OutputDescriptor;
+import org.apache.tez.dag.api.ProcessorDescriptor;
+import org.apache.tez.dag.api.UserPayload;
+import org.apache.tez.dag.records.TezDAGID;
+import org.apache.tez.dag.records.TezTaskAttemptID;
+import org.apache.tez.dag.records.TezTaskID;
+import org.apache.tez.dag.records.TezVertexID;
+import org.apache.tez.runtime.api.impl.InputSpec;
+import org.apache.tez.runtime.api.impl.OutputSpec;
+import org.apache.tez.runtime.api.impl.TaskSpec;
+import org.junit.Test;
+
+public class TestConverters {
+
+  @Test(timeout = 5000)
+  public void testTaskSpecToFragmentSpec() {
+ByteBuffer procBb = ByteBuffer.allocate(4);
+procBb.putInt(0, 200);
+UserPayload processorPayload = UserPayload.create(procBb);
+ProcessorDescriptor processorDescriptor =
+
ProcessorDescriptor.create("fakeProcessorName").setUserPayload(processorPayload);
+
+ByteBuffer input1Bb = ByteBuffer.allocate(4);
+input1Bb.putInt(0, 300);
+UserPayload input1Payload = UserPayload.create(input1Bb);
+InputDescriptor id1 = 
InputDescriptor.create("input1ClassName").setUserPayload(input1Payload);
+

[49/50] [abbrv] hive git commit: HIVE-10249 ACID: show locks should show who the lock is waiting for (Eugene Koifman, reviewed by Wei Zheng)

2016-04-04 Thread jdere
HIVE-10249 ACID: show locks should show who the lock is waiting for (Eugene 
Koifman, reviewed by Wei Zheng)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4e9f95a1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4e9f95a1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4e9f95a1

Branch: refs/heads/llap
Commit: 4e9f95a1bad89ac4ea0cefc65eeba7a1e56a948d
Parents: 51efcb8
Author: Eugene Koifman 
Authored: Wed Mar 30 12:17:06 2016 -0700
Committer: Eugene Koifman 
Committed: Wed Mar 30 12:17:06 2016 -0700

--
 .../hadoop/hive/metastore/txn/TxnDbUtil.java|  6 ++-
 .../hadoop/hive/metastore/txn/TxnHandler.java   | 46 
 .../hive/metastore/txn/TestTxnHandler.java  |  2 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java | 16 ++-
 .../hive/ql/lockmgr/TestDbTxnManager2.java  | 28 
 .../clientpositive/dbtxnmgr_showlocks.q.out |  6 +--
 6 files changed, 89 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/4e9f95a1/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
index df480ea..c82d23a 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
@@ -103,7 +103,11 @@ public final class TxnDbUtil {
   " HL_ACQUIRED_AT bigint," +
   " HL_USER varchar(128) NOT NULL," +
   " HL_HOST varchar(128) NOT NULL," +
-  " PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID))");
+  " HL_HEARTBEAT_COUNT integer," +
+  " HL_AGENT_INFO varchar(128)," +
+  " HL_BLOCKEDBY_EXT_ID bigint," +
+  " HL_BLOCKEDBY_INT_ID bigint," +
+" PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID))");
   stmt.execute("CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID)");
 
   stmt.execute("CREATE TABLE NEXT_LOCK_ID (" + " NL_NEXT bigint NOT 
NULL)");

http://git-wip-us.apache.org/repos/asf/hive/blob/4e9f95a1/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index 21faff4..be3c6de 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -847,8 +847,8 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
*/
   private static class LockInfoExt extends LockInfo {
 private final ShowLocksResponseElement e;
-LockInfoExt(ShowLocksResponseElement e, long intLockId) {
-  super(e, intLockId);
+LockInfoExt(ShowLocksResponseElement e) {
+  super(e);
   this.e = e;
 }
   }
@@ -864,7 +864,8 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
 stmt = dbConn.createStatement();
 
 String s = "select hl_lock_ext_id, hl_txnid, hl_db, hl_table, 
hl_partition, hl_lock_state, " +
-  "hl_lock_type, hl_last_heartbeat, hl_acquired_at, hl_user, hl_host, 
hl_lock_int_id from HIVE_LOCKS";
+  "hl_lock_type, hl_last_heartbeat, hl_acquired_at, hl_user, hl_host, 
hl_lock_int_id," +
+  "hl_blockedby_ext_id, hl_blockedby_int_id from HIVE_LOCKS";
 LOG.debug("Doing to execute query <" + s + ">");
 ResultSet rs = stmt.executeQuery(s);
 while (rs.next()) {
@@ -892,7 +893,16 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
   if (!rs.wasNull()) e.setAcquiredat(acquiredAt);
   e.setUser(rs.getString(10));
   e.setHostname(rs.getString(11));
-  sortedList.add(new LockInfoExt(e, rs.getLong(12)));
+  e.setLockIdInternal(rs.getLong(12));
+  long id = rs.getLong(13);
+  if(!rs.wasNull()) {
+e.setBlockedByExtId(id);
+  }
+  id = rs.getLong(14);
+  if(!rs.wasNull()) {
+e.setBlockedByIntId(id);
+  }
+  sortedList.add(new LockInfoExt(e));
 }
 LOG.debug("Going to rollback");
 dbConn.rollback();
@@ -1142,6 +1152,10 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
   private static void shouldNeverHappen(long txnid) {
 throw new RuntimeException("This should never happen: " + 
JavaUtils.txnIdToString(txnid));
   }
+  private static void shouldNeverHappen(long txnid, long 

[42/50] [abbrv] hive git commit: HIVE-13326: HiveServer2: Make ZK config publishing configurable (Vaibhav Gumashta reviewed by Thejas Nair)

2016-04-04 Thread jdere
HIVE-13326: HiveServer2: Make ZK config publishing configurable (Vaibhav 
Gumashta reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/56b64598
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/56b64598
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/56b64598

Branch: refs/heads/llap
Commit: 56b645981cf466830daaed98f978df5f509bd149
Parents: a14ef8a
Author: Vaibhav Gumashta 
Authored: Tue Mar 29 12:57:47 2016 -0700
Committer: Vaibhav Gumashta 
Committed: Tue Mar 29 12:57:47 2016 -0700

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   2 +
 .../org/apache/hive/jdbc/miniHS2/MiniHS2.java   |  56 +++-
 .../jdbc/TestJdbcWithLocalClusterSpark.java |   2 +-
 .../apache/hive/jdbc/TestJdbcWithMiniMr.java|   2 +-
 ...stMultiSessionsHS2WithLocalClusterSpark.java |   6 +-
 .../jdbc/TestServiceDiscoveryWithMiniHS2.java   | 132 +++
 .../jdbc/authorization/TestHS2AuthzContext.java |   4 +-
 .../authorization/TestJdbcMetadataApiAuth.java  |   2 +-
 .../TestJdbcWithSQLAuthorization.java   |   2 +-
 .../hive/jdbc/ZooKeeperHiveClientHelper.java|  21 ++-
 .../apache/hive/service/server/HiveServer2.java |  39 +++---
 11 files changed, 236 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/56b64598/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index f03c1ab..95c5c0e 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2017,6 +2017,8 @@ public class HiveConf extends Configuration {
 "hive.zookeeper.quorum in their connection string."),
 HIVE_SERVER2_ZOOKEEPER_NAMESPACE("hive.server2.zookeeper.namespace", 
"hiveserver2",
 "The parent node in ZooKeeper used by HiveServer2 when supporting 
dynamic service discovery."),
+
HIVE_SERVER2_ZOOKEEPER_PUBLISH_CONFIGS("hive.server2.zookeeper.publish.configs",
 true,
+"Whether we should publish HiveServer2's configs to ZooKeeper."),
 
 // HiveServer2 global init file location
 
HIVE_SERVER2_GLOBAL_INIT_FILE_LOCATION("hive.server2.global.init.file.location",
 "${env:HIVE_CONF_DIR}",

http://git-wip-us.apache.org/repos/asf/hive/blob/56b64598/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
--
diff --git 
a/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java 
b/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
index 751d8ea..a9d9c76 100644
--- a/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
+++ b/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.WindowsPathUtil;
 import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.util.ZooKeeperHiveHelper;
 import org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim;
 import org.apache.hadoop.hive.shims.HadoopShims.MiniMrShim;
 import org.apache.hadoop.hive.shims.ShimLoader;
@@ -303,6 +304,13 @@ public class MiniHS2 extends AbstractHiveService {
 return getServiceClientInternal();
   }
 
+  public HiveConf getServerConf() {
+if (hiveServer2 != null) {
+  return hiveServer2.getHiveConf();
+}
+return null;
+  }
+
   public CLIServiceClient getServiceClientInternal() {
 for (Service service : hiveServer2.getServices()) {
   if (service instanceof ThriftBinaryCLIService) {
@@ -318,8 +326,9 @@ public class MiniHS2 extends AbstractHiveService {
   /**
* return connection URL for this server instance
* @return
+   * @throws Exception
*/
-  public String getJdbcURL() {
+  public String getJdbcURL() throws Exception {
 return getJdbcURL("default");
   }
 
@@ -327,8 +336,9 @@ public class MiniHS2 extends AbstractHiveService {
* return connection URL for this server instance
* @param dbName - DB name to be included in the URL
* @return
+   * @throws Exception
*/
-  public String getJdbcURL(String dbName) {
+  public String getJdbcURL(String dbName) throws Exception {
 return getJdbcURL(dbName, "");
   }
 
@@ -337,8 +347,9 @@ public class MiniHS2 extends AbstractHiveService {
* @param dbName - DB name to be included in the URL
* @param sessionConfExt - Addional string to be appended to sessionConf 
part of url
* @return
+   

[21/50] [abbrv] hive git commit: HIVE-13319 : Fix passing external handles in task display (Rajat Khandelwal, reviewed by amareshwari)

2016-04-04 Thread jdere
HIVE-13319 : Fix passing external handles in task display (Rajat Khandelwal, 
reviewed by amareshwari)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/41a30b59
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/41a30b59
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/41a30b59

Branch: refs/heads/llap
Commit: 41a30b59de35601211657b65a20b9d418958fb58
Parents: 69cfd35
Author: Rajat Khandelwal 
Authored: Mon Mar 28 09:45:43 2016 +0530
Committer: Amareshwari Sriramadasu 
Committed: Mon Mar 28 09:45:43 2016 +0530

--
 ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java  | 11 +++
 .../test/org/apache/hive/service/cli/CLIServiceTest.java |  8 
 2 files changed, 15 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/41a30b59/ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java 
b/ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java
index 467dab6..d582bc0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java
@@ -72,7 +72,7 @@ public class QueryDisplay {
 private Long endTime;
 
 private String taskId;
-private String taskExternalHandle;
+private String externalHandle;
 
 public Task.TaskState taskState;
 private StageType taskType;
@@ -85,7 +85,7 @@ public class QueryDisplay {
 }
 public TaskDisplay(Task task) {
   taskId = task.getId();
-  taskExternalHandle = task.getExternalHandle();
+  externalHandle = task.getExternalHandle();
   taskType = task.getType();
   name = task.getName();
   requireLock = task.requireLock();
@@ -150,12 +150,15 @@ public class QueryDisplay {
 }
 
 public synchronized String getExternalHandle() {
-  return taskExternalHandle;
+  return externalHandle;
 }
 
 public synchronized  void updateStatus(Task 
tTask) {
   this.taskState = tTask.getTaskState();
-  switch(taskState) {
+  if (externalHandle == null && tTask.getExternalHandle() != null) {
+this.externalHandle = tTask.getExternalHandle();
+  }
+  switch (taskState) {
 case RUNNING:
   beginTime = System.currentTimeMillis();
   break;

http://git-wip-us.apache.org/repos/asf/hive/blob/41a30b59/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java
--
diff --git a/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java 
b/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java
index e145eb4..698b13d 100644
--- a/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java
+++ b/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java
@@ -45,6 +45,7 @@ import org.codehaus.jackson.type.TypeReference;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.plan.api.StageType;
 import org.apache.hadoop.hive.ql.session.SessionState;
 
 import org.junit.After;
@@ -676,6 +677,7 @@ public abstract class CLIServiceTest {
   switch (taskDisplay.taskState) {
 case INITIALIZED:
 case QUEUED:
+  assertNull(taskDisplay.getExternalHandle());
   assertNull(taskDisplay.getBeginTime());
   assertNull(taskDisplay.getEndTime());
   assertNull(taskDisplay.getElapsedTime());
@@ -683,6 +685,9 @@ public abstract class CLIServiceTest {
   assertNull(taskDisplay.getReturnValue());
   break;
 case RUNNING:
+  if (taskDisplay.getTaskType() == StageType.MAPRED || 
taskDisplay.getTaskType() == StageType.MAPREDLOCAL) {
+assertNotNull(taskDisplay.getExternalHandle());
+  }
   assertNotNull(taskDisplay.getBeginTime());
   assertNull(taskDisplay.getEndTime());
   assertNotNull(taskDisplay.getElapsedTime());
@@ -690,6 +695,9 @@ public abstract class CLIServiceTest {
   assertNull(taskDisplay.getReturnValue());
   break;
 case FINISHED:
+  if (taskDisplay.getTaskType() == StageType.MAPRED || 
taskDisplay.getTaskType() == StageType.MAPREDLOCAL) {
+assertNotNull(taskDisplay.getExternalHandle());
+  }
   assertNotNull(taskDisplay.getBeginTime());
   assertNotNull(taskDisplay.getEndTime());
   assertNotNull(taskDisplay.getElapsedTime());



[07/50] [abbrv] hive git commit: HIVE-13362: Commit binary file required for HIVE-13361 (Prasanth Jayachandran reviewed by Gopal V)

2016-04-04 Thread jdere
HIVE-13362: Commit binary file required for HIVE-13361 (Prasanth Jayachandran 
reviewed by Gopal V)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/dfba1fb2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/dfba1fb2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/dfba1fb2

Branch: refs/heads/llap
Commit: dfba1fb280f82822c1c006a0961a3ce9a52b6a6d
Parents: ab095f0
Author: Prasanth Jayachandran 
Authored: Thu Mar 24 20:09:14 2016 -0500
Committer: Prasanth Jayachandran 
Committed: Thu Mar 24 20:10:26 2016 -0500

--
 data/files/alltypesorc3xcols | Bin 0 -> 1504592 bytes
 1 file changed, 0 insertions(+), 0 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/dfba1fb2/data/files/alltypesorc3xcols
--
diff --git a/data/files/alltypesorc3xcols b/data/files/alltypesorc3xcols
new file mode 100644
index 000..e484873
Binary files /dev/null and b/data/files/alltypesorc3xcols differ



[39/50] [abbrv] hive git commit: HIVE-11424 : Rule to transform OR clauses into IN clauses in CBO (Jesus Camacho Rodriguez via Ashutosh Chauhan)

2016-04-04 Thread jdere
HIVE-11424 : Rule to transform OR clauses into IN clauses in CBO (Jesus Camacho 
Rodriguez via Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8c8ff3f1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8c8ff3f1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8c8ff3f1

Branch: refs/heads/llap
Commit: 8c8ff3f144921e9b985abe51eb82ebad94195b4a
Parents: 09b00fc
Author: Jesus Camacho Rodriguez 
Authored: Tue Mar 22 23:41:00 2016 -0800
Committer: Ashutosh Chauhan 
Committed: Tue Mar 29 11:18:58 2016 -0700

--
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |   7 +
 .../hadoop/hive/ql/optimizer/Optimizer.java |   4 +-
 .../rules/HivePointLookupOptimizerRule.java | 381 +++
 .../ql/optimizer/pcr/PcrExprProcFactory.java| 103 ++---
 .../hadoop/hive/ql/parse/CalcitePlanner.java|  40 +-
 .../clientpositive/auto_join19_inclause.q   |  18 +
 .../queries/clientpositive/filter_in_or_dup.q   |  19 +
 .../clientpositive/auto_join19_inclause.q.out   | 130 +++
 .../clientpositive/constprog_semijoin.q.out |   4 +-
 .../dynpart_sort_optimization_acid.q.out|   4 +-
 .../clientpositive/filter_in_or_dup.q.out   |  96 +
 .../results/clientpositive/perf/query13.q.out   |  14 +-
 .../results/clientpositive/perf/query27.q.out   |   2 +-
 .../results/clientpositive/perf/query34.q.out   |   2 +-
 .../results/clientpositive/perf/query48.q.out   |  14 +-
 .../results/clientpositive/perf/query68.q.out   |   2 +-
 .../results/clientpositive/perf/query73.q.out   |   2 +-
 .../results/clientpositive/perf/query79.q.out   |   2 +-
 .../results/clientpositive/perf/query82.q.out   |   2 +-
 .../results/clientpositive/perf/query85.q.out   |  26 +-
 .../results/clientpositive/pointlookup2.q.out   |  38 +-
 .../results/clientpositive/pointlookup3.q.out   |  50 ++-
 .../results/clientpositive/pointlookup4.q.out   |   2 +-
 .../spark/constprog_semijoin.q.out  |   4 +-
 .../clientpositive/tez/bucketpruning1.q.out |   8 +-
 .../clientpositive/tez/constprog_semijoin.q.out |   4 +-
 .../tez/vector_mr_diff_schema_alias.q.out   |   2 +-
 .../vector_mr_diff_schema_alias.q.out   |   2 +-
 28 files changed, 824 insertions(+), 158 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/8c8ff3f1/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index b516925..56b96b4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -1398,6 +1398,13 @@ public final class FunctionRegistry {
   }
 
   /**
+   * Returns whether the exprNodeDesc is a node of "in".
+   */
+  public static boolean isIn(ExprNodeDesc desc) {
+return GenericUDFIn.class == getGenericUDFClassFromExprDesc(desc);
+  }
+
+  /**
* Returns whether the exprNodeDesc is a node of "not".
*/
   public static boolean isOpNot(ExprNodeDesc desc) {

http://git-wip-us.apache.org/repos/asf/hive/blob/8c8ff3f1/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
index f56cd96..55c71dd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
@@ -23,7 +23,6 @@ import java.util.List;
 import java.util.Set;
 
 import org.apache.hadoop.hive.conf.HiveConf;
-import 
org.apache.hadoop.hive.ql.optimizer.ConstantPropagateProcCtx.ConstantPropagateOption;
 import 
org.apache.hadoop.hive.ql.optimizer.calcite.translator.HiveOpConverterPostProc;
 import org.apache.hadoop.hive.ql.optimizer.correlation.CorrelationOptimizer;
 import org.apache.hadoop.hive.ql.optimizer.correlation.ReduceSinkDeDuplication;
@@ -83,7 +82,8 @@ public class Optimizer {
 }
 
 // Try to transform OR predicates in Filter into simpler IN clauses first
-if (HiveConf.getBoolVar(hiveConf, 
HiveConf.ConfVars.HIVEPOINTLOOKUPOPTIMIZER)) {
+if (HiveConf.getBoolVar(hiveConf, 
HiveConf.ConfVars.HIVEPOINTLOOKUPOPTIMIZER) &&
+!pctx.getContext().isCboSucceeded()) {
   final int min = HiveConf.getIntVar(hiveConf,
   HiveConf.ConfVars.HIVEPOINTLOOKUPOPTIMIZERMIN);
   transformations.add(new PointLookupOptimizer(min));


[20/50] [abbrv] hive git commit: HIVE-13115: MetaStore Direct SQL getPartitions call fail when the columns schemas for a partition are null (Ratandeep Ratti reviewed by Carl Steinbach)

2016-04-04 Thread jdere
HIVE-13115: MetaStore Direct SQL getPartitions call fail when the columns 
schemas for a partition are null (Ratandeep Ratti reviewed by Carl Steinbach)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/69cfd357
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/69cfd357
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/69cfd357

Branch: refs/heads/llap
Commit: 69cfd357eb482c426161aa3c4a00d574ee29416e
Parents: 9686209
Author: Carl Steinbach 
Authored: Sun Mar 27 15:41:38 2016 -0700
Committer: Carl Steinbach 
Committed: Sun Mar 27 15:41:38 2016 -0700

--
 .../hive/metastore/TestHiveMetaStore.java   | 20 +++-
 .../hive/metastore/MetaStoreDirectSql.java  | 32 +++-
 2 files changed, 37 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/69cfd357/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
index 5da4165..83fb15c 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
@@ -280,6 +280,24 @@ public abstract class TestHiveMetaStore extends TestCase {
   }
   assertTrue("Partitions are not same", part.equals(part_get));
 
+  // check null cols schemas for a partition
+  List vals6 = makeVals("2016-02-22 00:00:00", "16");
+  Partition part6 = makePartitionObject(dbName, tblName, vals6, tbl, 
"/part5");
+  part6.getSd().setCols(null);
+  LOG.info("Creating partition will null field schema");
+  client.add_partition(part6);
+  LOG.info("Listing all partitions for table " + dbName + "." + tblName);
+  final List partitions = client.listPartitions(dbName, 
tblName, (short) -1);
+  boolean foundPart = false;
+  for (Partition p : partitions) {
+if (p.getValues().equals(vals6)) {
+  assertNull(p.getSd().getCols());
+  LOG.info("Found partition " + p + " having null field schema");
+  foundPart = true;
+}
+  }
+  assertTrue(foundPart);
+
   String partName = "ds=" + FileUtils.escapePathName("2008-07-01 
14:13:12") + "/hr=14";
   String part2Name = "ds=" + FileUtils.escapePathName("2008-07-01 
14:13:12") + "/hr=15";
   String part3Name = "ds=" + FileUtils.escapePathName("2008-07-02 
14:13:12") + "/hr=15";
@@ -313,7 +331,7 @@ public abstract class TestHiveMetaStore extends TestCase {
   partialVals.clear();
   partialVals.add("");
   partialNames = client.listPartitionNames(dbName, tblName, partialVals, 
(short) -1);
-  assertTrue("Should have returned 4 partition names", partialNames.size() 
== 4);
+  assertTrue("Should have returned 5 partition names", partialNames.size() 
== 5);
   assertTrue("Not all part names returned", 
partialNames.containsAll(partNames));
 
   // Test partition listing with a partial spec - hr is specified but ds 
is not

http://git-wip-us.apache.org/repos/asf/hive/blob/69cfd357/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
index d51f58d..06e9f78 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
@@ -557,14 +557,14 @@ class MetaStoreDirectSql {
   Long sdId = extractSqlLong(fields[1]);
   Long colId = extractSqlLong(fields[2]);
   Long serdeId = extractSqlLong(fields[3]);
-  // A partition must have either everything set, or nothing set if it's a 
view.
-  if (sdId == null || colId == null || serdeId == null) {
+  // A partition must have at least sdId and serdeId set, or nothing set 
if it's a view.
+  if (sdId == null || serdeId == null) {
 if (isView == null) {
   isView = isViewTable(dbName, tblName);
 }
 if ((sdId != null || colId != null || serdeId != null) || !isView) {
-  throw new MetaException("Unexpected null for one of the IDs, SD " + 
sdId + ", column "
-  + colId + ", serde " + serdeId + " for a " + (isView ? "" : 
"non-") + " view");
+  throw new MetaException("Unexpected null for one of the IDs, SD " + 
sdId +
+  ", serde 

[50/50] [abbrv] hive git commit: HIVE-13419: Merge master into llap branch

2016-04-04 Thread jdere
HIVE-13419: Merge master into llap branch


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a7b0ca73
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a7b0ca73
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a7b0ca73

Branch: refs/heads/llap
Commit: a7b0ca733e416951ab6c36f71dbe512665477535
Parents: 28d1082 4e9f95a
Author: Jason Dere 
Authored: Mon Apr 4 13:37:14 2016 -0700
Committer: Jason Dere 
Committed: Mon Apr 4 13:37:14 2016 -0700

--
 .../apache/hadoop/hive/ant/GenVectorCode.java   |  531 +-
 .../org/apache/hadoop/hive/cli/CliDriver.java   |3 +
 .../apache/hadoop/hive/common/FileUtils.java|   54 +
 .../apache/hadoop/hive/common/ServerUtils.java  |   11 +
 .../hive/common/type/HiveIntervalDayTime.java   |  245 -
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   31 +-
 .../org/apache/hive/common/util/DateUtils.java  |   19 -
 .../hive/contrib/serde2/MultiDelimitSerDe.java  |   23 +-
 data/files/alltypesorc3xcols|  Bin 0 -> 1504592 bytes
 data/files/timestamps.txt   |   50 +
 .../deployers/config/hive/hive-site.mysql.xml   |   24 +-
 .../hive/hcatalog/streaming/HiveEndPoint.java   |   11 +
 .../hcatalog/templeton/SecureProxySupport.java  |   46 +-
 .../antlr4/org/apache/hive/hplsql/Hplsql.g4 |  266 +-
 .../main/java/org/apache/hive/hplsql/Conf.java  |2 +-
 .../main/java/org/apache/hive/hplsql/Conn.java  |3 +-
 .../java/org/apache/hive/hplsql/Converter.java  |   15 +-
 .../main/java/org/apache/hive/hplsql/Exec.java  |  106 +-
 .../java/org/apache/hive/hplsql/Expression.java |   13 +
 .../main/java/org/apache/hive/hplsql/File.java  |   11 +
 .../main/java/org/apache/hive/hplsql/Ftp.java   |  415 +
 .../main/java/org/apache/hive/hplsql/Meta.java  |   35 +-
 .../java/org/apache/hive/hplsql/Package.java|3 +
 .../main/java/org/apache/hive/hplsql/Row.java   |4 +-
 .../java/org/apache/hive/hplsql/Select.java |6 +-
 .../main/java/org/apache/hive/hplsql/Stmt.java  |  167 +-
 .../main/java/org/apache/hive/hplsql/Utils.java |   40 +
 .../main/java/org/apache/hive/hplsql/Var.java   |   18 +-
 .../apache/hive/hplsql/functions/Function.java  |   40 +-
 .../hive/hplsql/functions/FunctionDatetime.java |   40 +
 .../hive/hplsql/functions/FunctionString.java   |   26 +-
 .../org/apache/hive/hplsql/TestHplsqlLocal.java |   28 +-
 .../apache/hive/hplsql/TestHplsqlOffline.java   |   25 +
 .../test/queries/db/create_drop_database.sql|5 +
 .../queries/db/create_procedure_no_params.sql   |   25 +
 hplsql/src/test/queries/db/describe.sql |3 +
 hplsql/src/test/queries/db/execute.sql  |7 +
 hplsql/src/test/queries/db/expression.sql   |1 +
 hplsql/src/test/queries/db/for.sql  |1 +
 hplsql/src/test/queries/db/insert.sql   |3 +
 hplsql/src/test/queries/db/insert_directory.sql |   12 +
 hplsql/src/test/queries/db/schema.sql   |   10 +-
 hplsql/src/test/queries/db/truncate_table.sql   |2 +
 .../src/test/queries/local/create_function3.sql |   58 +
 .../src/test/queries/local/create_function4.sql |   19 +
 .../test/queries/local/create_procedure3.sql|   29 +
 hplsql/src/test/queries/local/declare3.sql  |7 +
 hplsql/src/test/queries/local/if.sql|6 +-
 hplsql/src/test/queries/local/interval.sql  |4 +-
 hplsql/src/test/queries/local/replace.sql   |1 +
 .../queries/offline/create_table_mssql2.sql |   33 +
 .../test/queries/offline/create_table_mysql.sql |5 +
 .../test/queries/offline/create_table_ora2.sql  |6 +
 .../test/queries/offline/create_table_pg.sql|5 +
 hplsql/src/test/queries/offline/update.sql  |   33 +
 .../results/db/create_drop_database.out.txt |8 +
 .../results/db/create_procedure_mssql.out.txt   |2 +-
 .../db/create_procedure_no_params.out.txt   |   10 +
 .../db/create_procedure_return_cursor.out.txt   |4 +-
 .../db/create_procedure_return_cursor2.out.txt  |4 +-
 hplsql/src/test/results/db/describe.out.txt |   12 +
 hplsql/src/test/results/db/execute.out.txt  |   14 +
 hplsql/src/test/results/db/expression.out.txt   |5 +
 hplsql/src/test/results/db/for.out.txt  |   44 +
 hplsql/src/test/results/db/insert.out.txt   |4 +
 .../test/results/db/insert_directory.out.txt|9 +
 .../test/results/db/rowtype_attribute.out.txt   |2 +-
 .../src/test/results/db/truncate_table.out.txt  |4 +
 .../test/results/local/create_function3.out.txt |   22 +
 .../test/results/local/create_function4.out.txt |9 +
 .../test/results/local/create_package.out.txt   |2 +-
 .../results/local/create_procedure3.out.txt |   31 +
 .../local/create_procedure_no_params.out.txt|   12 +-
 hplsql/src/test/results/local/declare3.out.txt  |9 +
 

hive git commit: HIVE-13365. Change the MiniLLAPCluster to work with a MiniZKCluster, and potentially allow multiple instances of LLAP within the MiniLlapCluster. (Siddharth Seth, reviewed by Sergey S

2016-04-04 Thread sseth
Repository: hive
Updated Branches:
  refs/heads/master b44650231 -> 91ab819a1


HIVE-13365. Change the MiniLLAPCluster to work with a MiniZKCluster, and 
potentially allow multiple instances of LLAP within the MiniLlapCluster. 
(Siddharth Seth, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/91ab819a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/91ab819a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/91ab819a

Branch: refs/heads/master
Commit: 91ab819a18d6271a6c8905d085ad90b1b184ecae
Parents: b446502
Author: Siddharth Seth 
Authored: Mon Apr 4 15:23:37 2016 -0700
Committer: Siddharth Seth 
Committed: Mon Apr 4 15:23:37 2016 -0700

--
 itests/hive-unit/pom.xml|   2 -
 .../org/apache/hive/jdbc/miniHS2/MiniHS2.java   |   2 +-
 .../apache/hadoop/hive/llap/LlapItUtils.java|  10 +-
 .../org/apache/hadoop/hive/ql/QTestUtil.java|  10 +-
 .../hive/llap/daemon/MiniLlapCluster.java   | 145 ---
 5 files changed, 109 insertions(+), 60 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/91ab819a/itests/hive-unit/pom.xml
--
diff --git a/itests/hive-unit/pom.xml b/itests/hive-unit/pom.xml
index 7219f1d..ae231de 100644
--- a/itests/hive-unit/pom.xml
+++ b/itests/hive-unit/pom.xml
@@ -210,14 +210,12 @@
   org.apache.hbase
   hbase-server
   ${hbase.version}
-  test
 
 
   org.apache.hbase
   hbase-server
   ${hbase.version}
   test-jar
-  test
 
 
   org.apache.hbase

http://git-wip-us.apache.org/repos/asf/hive/blob/91ab819a/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
--
diff --git 
a/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java 
b/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
index 6141a1a..6b337d2 100644
--- a/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
+++ b/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
@@ -202,7 +202,7 @@ public class MiniHS2 extends AbstractHiveService {
 if (usePortsFromConf) {
   hiveConf.setBoolean("minillap.usePortsFromConf", true);
 }
-llapCluster = LlapItUtils.startAndGetMiniLlapCluster(hiveConf, null);
+llapCluster = LlapItUtils.startAndGetMiniLlapCluster(hiveConf, null, 
null);
 
 mr = ShimLoader.getHadoopShims().getMiniTezCluster(hiveConf, 4, 
uriString);
 break;

http://git-wip-us.apache.org/repos/asf/hive/blob/91ab819a/itests/util/src/main/java/org/apache/hadoop/hive/llap/LlapItUtils.java
--
diff --git 
a/itests/util/src/main/java/org/apache/hadoop/hive/llap/LlapItUtils.java 
b/itests/util/src/main/java/org/apache/hadoop/hive/llap/LlapItUtils.java
index cb4aba5..c1a32c9 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/llap/LlapItUtils.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/llap/LlapItUtils.java
@@ -25,6 +25,7 @@ import java.util.Iterator;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.llap.configuration.LlapDaemonConfiguration;
 import org.apache.hadoop.hive.llap.daemon.MiniLlapCluster;
@@ -36,7 +37,9 @@ public class LlapItUtils {
 
   private static final Logger LOG = LoggerFactory.getLogger(LlapItUtils.class);
 
-  public static MiniLlapCluster startAndGetMiniLlapCluster(Configuration conf, 
String confDir) throws
+  public static MiniLlapCluster startAndGetMiniLlapCluster(Configuration conf,
+   
MiniZooKeeperCluster miniZkCluster,
+   String confDir) 
throws
   IOException {
 MiniLlapCluster llapCluster;
 LOG.info("Using conf dir: {}", confDir);
@@ -57,11 +60,14 @@ public class LlapItUtils {
 // enabling this will cause test failures in Mac OS X
 final boolean directMemoryEnabled = false;
 final int numLocalDirs = 1;
-LOG.info("MiniLlap Configs - maxMemory: " + maxMemory + " memoryForCache: 
" + memoryForCache
+LOG.info("MiniLlap Configs -  maxMemory: " + maxMemory +
+" memoryForCache: " + memoryForCache
 + " totalExecutorMemory: " + totalExecutorMemory + " numExecutors: " + 
numExecutors
 + " asyncIOEnabled: " + asyncIOEnabled + " directMemoryEnabled: " + 
directMemoryEnabled
 + " numLocalDirs: " + numLocalDirs);
 

[23/24] hive git commit: HIVE-13365. Change the MiniLLAPCluster to work with a MiniZKCluster, and potentially allow multiple instances of LLAP within the MiniLlapCluster. (Siddharth Seth, reviewed by

2016-04-04 Thread sseth
HIVE-13365. Change the MiniLLAPCluster to work with a MiniZKCluster, and 
potentially allow multiple instances of LLAP within the MiniLlapCluster. 
(Siddharth Seth, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/91ab819a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/91ab819a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/91ab819a

Branch: refs/heads/llap
Commit: 91ab819a18d6271a6c8905d085ad90b1b184ecae
Parents: b446502
Author: Siddharth Seth 
Authored: Mon Apr 4 15:23:37 2016 -0700
Committer: Siddharth Seth 
Committed: Mon Apr 4 15:23:37 2016 -0700

--
 itests/hive-unit/pom.xml|   2 -
 .../org/apache/hive/jdbc/miniHS2/MiniHS2.java   |   2 +-
 .../apache/hadoop/hive/llap/LlapItUtils.java|  10 +-
 .../org/apache/hadoop/hive/ql/QTestUtil.java|  10 +-
 .../hive/llap/daemon/MiniLlapCluster.java   | 145 ---
 5 files changed, 109 insertions(+), 60 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/91ab819a/itests/hive-unit/pom.xml
--
diff --git a/itests/hive-unit/pom.xml b/itests/hive-unit/pom.xml
index 7219f1d..ae231de 100644
--- a/itests/hive-unit/pom.xml
+++ b/itests/hive-unit/pom.xml
@@ -210,14 +210,12 @@
   org.apache.hbase
   hbase-server
   ${hbase.version}
-  test
 
 
   org.apache.hbase
   hbase-server
   ${hbase.version}
   test-jar
-  test
 
 
   org.apache.hbase

http://git-wip-us.apache.org/repos/asf/hive/blob/91ab819a/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
--
diff --git 
a/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java 
b/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
index 6141a1a..6b337d2 100644
--- a/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
+++ b/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
@@ -202,7 +202,7 @@ public class MiniHS2 extends AbstractHiveService {
 if (usePortsFromConf) {
   hiveConf.setBoolean("minillap.usePortsFromConf", true);
 }
-llapCluster = LlapItUtils.startAndGetMiniLlapCluster(hiveConf, null);
+llapCluster = LlapItUtils.startAndGetMiniLlapCluster(hiveConf, null, 
null);
 
 mr = ShimLoader.getHadoopShims().getMiniTezCluster(hiveConf, 4, 
uriString);
 break;

http://git-wip-us.apache.org/repos/asf/hive/blob/91ab819a/itests/util/src/main/java/org/apache/hadoop/hive/llap/LlapItUtils.java
--
diff --git 
a/itests/util/src/main/java/org/apache/hadoop/hive/llap/LlapItUtils.java 
b/itests/util/src/main/java/org/apache/hadoop/hive/llap/LlapItUtils.java
index cb4aba5..c1a32c9 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/llap/LlapItUtils.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/llap/LlapItUtils.java
@@ -25,6 +25,7 @@ import java.util.Iterator;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.llap.configuration.LlapDaemonConfiguration;
 import org.apache.hadoop.hive.llap.daemon.MiniLlapCluster;
@@ -36,7 +37,9 @@ public class LlapItUtils {
 
   private static final Logger LOG = LoggerFactory.getLogger(LlapItUtils.class);
 
-  public static MiniLlapCluster startAndGetMiniLlapCluster(Configuration conf, 
String confDir) throws
+  public static MiniLlapCluster startAndGetMiniLlapCluster(Configuration conf,
+   
MiniZooKeeperCluster miniZkCluster,
+   String confDir) 
throws
   IOException {
 MiniLlapCluster llapCluster;
 LOG.info("Using conf dir: {}", confDir);
@@ -57,11 +60,14 @@ public class LlapItUtils {
 // enabling this will cause test failures in Mac OS X
 final boolean directMemoryEnabled = false;
 final int numLocalDirs = 1;
-LOG.info("MiniLlap Configs - maxMemory: " + maxMemory + " memoryForCache: 
" + memoryForCache
+LOG.info("MiniLlap Configs -  maxMemory: " + maxMemory +
+" memoryForCache: " + memoryForCache
 + " totalExecutorMemory: " + totalExecutorMemory + " numExecutors: " + 
numExecutors
 + " asyncIOEnabled: " + asyncIOEnabled + " directMemoryEnabled: " + 
directMemoryEnabled
 + " numLocalDirs: " + numLocalDirs);
 llapCluster = MiniLlapCluster.create(clusterName,
+miniZkCluster,
+

[18/24] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)

2016-04-04 Thread sseth
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveServerException.java
--
diff --git 
a/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveServerException.java
 
b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveServerException.java
new file mode 100644
index 000..97b1219
--- /dev/null
+++ 
b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveServerException.java
@@ -0,0 +1,601 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.service;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+public class HiveServerException extends TException implements 
org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("HiveServerException");
+
+  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = 
new org.apache.thrift.protocol.TField("message", 
org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField ERROR_CODE_FIELD_DESC 
= new org.apache.thrift.protocol.TField("errorCode", 
org.apache.thrift.protocol.TType.I32, (short)2);
+  private static final org.apache.thrift.protocol.TField SQLSTATE_FIELD_DESC = 
new org.apache.thrift.protocol.TField("SQLState", 
org.apache.thrift.protocol.TType.STRING, (short)3);
+
+  private static final Map schemes = 
new HashMap();
+  static {
+schemes.put(StandardScheme.class, new 
HiveServerExceptionStandardSchemeFactory());
+schemes.put(TupleScheme.class, new 
HiveServerExceptionTupleSchemeFactory());
+  }
+
+  private String message; // required
+  private int errorCode; // required
+  private String SQLState; // required
+
+  /** The set of fields this struct contains, along with convenience methods 
for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+MESSAGE((short)1, "message"),
+ERROR_CODE((short)2, "errorCode"),
+SQLSTATE((short)3, "SQLState");
+
+private static final Map byName = new HashMap();
+
+static {
+  for (_Fields field : EnumSet.allOf(_Fields.class)) {
+byName.put(field.getFieldName(), field);
+  }
+}
+
+/**
+ * Find the _Fields constant that matches fieldId, or null if its not 
found.
+ */
+public static _Fields findByThriftId(int fieldId) {
+  switch(fieldId) {
+case 1: // MESSAGE
+  return MESSAGE;
+case 2: // ERROR_CODE
+  return ERROR_CODE;
+case 3: // SQLSTATE
+  return SQLSTATE;
+default:
+  return null;
+  }
+}
+
+/**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+public static _Fields findByThriftIdOrThrow(int fieldId) {
+  _Fields fields = findByThriftId(fieldId);
+  if (fields == null) throw new IllegalArgumentException("Field " + 
fieldId + " doesn't exist!");
+  return fields;
+}
+
+/**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+public static _Fields findByName(String name) {
+  return byName.get(name);
+}
+
+private final short _thriftId;
+private final String _fieldName;
+
+_Fields(short thriftId, String fieldName) {
+  _thriftId = thriftId;
+  _fieldName = fieldName;
+}
+
+public short getThriftFieldId() {
+  return _thriftId;
+}
+
+public String getFieldName() {
+  return _fieldName;
+}
+  }
+
+  // isset id assignments
+  private 

[09/24] hive git commit: HIVE-12612: beeline always exits with 0 status when reading query from standard input (Reuben Kuhnert, reviewed by Sergio Pena)

2016-04-04 Thread sseth
HIVE-12612: beeline always exits with 0 status when reading query from standard 
input (Reuben Kuhnert, reviewed by Sergio Pena)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ac273b67
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ac273b67
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ac273b67

Branch: refs/heads/llap
Commit: ac273b672de402027181b71fb192930645bd5cc0
Parents: 03b81bc
Author: Sergio Pena 
Authored: Fri Apr 1 10:38:27 2016 -0500
Committer: Sergio Pena 
Committed: Fri Apr 1 10:38:27 2016 -0500

--
 beeline/pom.xml.orig| 169 +++
 .../java/org/apache/hive/beeline/BeeLine.java   |  18 +-
 .../apache/hive/beeline/cli/TestHiveCli.java|  15 +-
 3 files changed, 189 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/ac273b67/beeline/pom.xml.orig
--
diff --git a/beeline/pom.xml.orig b/beeline/pom.xml.orig
new file mode 100644
index 000..8ac83f5
--- /dev/null
+++ b/beeline/pom.xml.orig
@@ -0,0 +1,169 @@
+
+
+http://maven.apache.org/POM/4.0.0;
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd;>
+  4.0.0
+  
+org.apache.hive
+hive
+2.1.0-SNAPSHOT
+../pom.xml
+  
+
+  hive-beeline
+  jar
+  Hive Beeline
+
+  
+..
+  
+
+  
+
+
+
+  org.apache.hive
+  hive-common
+  ${project.version}
+
+
+  org.apache.hive
+  hive-metastore
+  ${project.version}
+
+
+  org.apache.hive
+  hive-shims
+  ${project.version}
+
+
+  org.apache.hive
+  hive-jdbc
+  ${project.version}
+
+
+
+  commons-cli
+  commons-cli
+  ${commons-cli.version}
+
+
+  commons-lang
+  commons-lang
+  ${commons-lang.version}
+
+
+  commons-io
+  commons-io
+  ${commons-io.version}
+
+
+  jline
+  jline
+  ${jline.version}
+
+
+  org.apache.hadoop
+  hadoop-common
+  ${hadoop.version}
+  true
+
+
+  org.apache.thrift
+  libthrift
+  ${libthrift.version}
+
+
+  net.sf.supercsv
+  super-csv
+  ${super-csv.version}
+
+
+
+  org.apache.hive
+  hive-exec
+  ${project.version}
+  tests
+  test
+
+
+  org.apache.hive
+  hive-service
+  ${project.version}
+  test
+
+
+
+  org.apache.hadoop
+  hadoop-mapreduce-client-core
+  ${hadoop.version}
+  test
+
+
+  junit
+  junit
+  ${junit.version}
+  test
+
+
+  postgresql
+  postgresql
+  9.1-901.jdbc4
+  test
+
+  
+
+  
+
+  sources
+  
+
+  
+org.apache.maven.plugins
+maven-source-plugin
+
+  
+attach-sources
+
+  test-jar
+
+  
+
+  
+
+  
+
+  
+
+  
+${basedir}/src/java
+${basedir}/src/test
+
+  
+org.apache.maven.plugins
+maven-jar-plugin
+
+  
+
+  test-jar
+
+  
+
+  
+
+  
+
+

http://git-wip-us.apache.org/repos/asf/hive/blob/ac273b67/beeline/src/java/org/apache/hive/beeline/BeeLine.java
--
diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLine.java 
b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
index 4ab6aa8..a4a9558 100644
--- a/beeline/src/java/org/apache/hive/beeline/BeeLine.java
+++ b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
@@ -953,26 +953,32 @@ public class BeeLine implements Closeable {
   }
 
   private int execute(ConsoleReader reader, boolean exitOnError) {
-String line;
+int lastExecutionResult = ERRNO_OK;
 while (!exit) {
   try {
 // Execute one instruction; terminate on executing a script if there 
is an error
 // in silent mode, prevent the query and prompt being echoed back to 
terminal
-line = (getOpts().isSilent() && getOpts().getScriptFile() != null) ? 
reader
+String line = (getOpts().isSilent() && getOpts().getScriptFile() != 
null) ? reader
 .readLine(null, ConsoleReader.NULL_MASK) : 
reader.readLine(getPrompt());
 
 // trim line
-line = (line == null) ? null : line.trim();
+if (line != null) {
+  line = line.trim();
+}
 
-if (!dispatch(line) && exitOnError) {
- 

[15/24] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)

2016-04-04 Thread sseth
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote
--
diff --git a/service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote 
b/service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote
new file mode 100755
index 000..9a2322f
--- /dev/null
+++ b/service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote
@@ -0,0 +1,1242 @@
+#!/usr/bin/env python
+#
+# Autogenerated by Thrift Compiler (0.9.3)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+#  options string: py
+#
+
+import sys
+import pprint
+from urlparse import urlparse
+from thrift.transport import TTransport
+from thrift.transport import TSocket
+from thrift.transport import TSSLSocket
+from thrift.transport import THttpClient
+from thrift.protocol import TBinaryProtocol
+
+from hive_service import ThriftHive
+from hive_service.ttypes import *
+
+if len(sys.argv) <= 1 or sys.argv[1] == '--help':
+  print('')
+  print('Usage: ' + sys.argv[0] + ' [-h host[:port]] [-u url] [-f[ramed]] 
[-s[sl]] function [arg1 [arg2...]]')
+  print('')
+  print('Functions:')
+  print('  void execute(string query)')
+  print('  string fetchOne()')
+  print('   fetchN(i32 numRows)')
+  print('   fetchAll()')
+  print('  Schema getSchema()')
+  print('  Schema getThriftSchema()')
+  print('  HiveClusterStatus getClusterStatus()')
+  print('  QueryPlan getQueryPlan()')
+  print('  void clean()')
+  print('  string getMetaConf(string key)')
+  print('  void setMetaConf(string key, string value)')
+  print('  void create_database(Database database)')
+  print('  Database get_database(string name)')
+  print('  void drop_database(string name, bool deleteData, bool cascade)')
+  print('   get_databases(string pattern)')
+  print('   get_all_databases()')
+  print('  void alter_database(string dbname, Database db)')
+  print('  Type get_type(string name)')
+  print('  bool create_type(Type type)')
+  print('  bool drop_type(string type)')
+  print('   get_type_all(string name)')
+  print('   get_fields(string db_name, string table_name)')
+  print('   get_fields_with_environment_context(string db_name, string 
table_name, EnvironmentContext environment_context)')
+  print('   get_schema(string db_name, string table_name)')
+  print('   get_schema_with_environment_context(string db_name, string 
table_name, EnvironmentContext environment_context)')
+  print('  void create_table(Table tbl)')
+  print('  void create_table_with_environment_context(Table tbl, 
EnvironmentContext environment_context)')
+  print('  void drop_table(string dbname, string name, bool deleteData)')
+  print('  void drop_table_with_environment_context(string dbname, string 
name, bool deleteData, EnvironmentContext environment_context)')
+  print('   get_tables(string db_name, string pattern)')
+  print('   get_table_meta(string db_patterns, string tbl_patterns,  
tbl_types)')
+  print('   get_all_tables(string db_name)')
+  print('  Table get_table(string dbname, string tbl_name)')
+  print('   get_table_objects_by_name(string dbname,  tbl_names)')
+  print('   get_table_names_by_filter(string dbname, string filter, i16 
max_tables)')
+  print('  void alter_table(string dbname, string tbl_name, Table new_tbl)')
+  print('  void alter_table_with_environment_context(string dbname, string 
tbl_name, Table new_tbl, EnvironmentContext environment_context)')
+  print('  void alter_table_with_cascade(string dbname, string tbl_name, Table 
new_tbl, bool cascade)')
+  print('  Partition add_partition(Partition new_part)')
+  print('  Partition add_partition_with_environment_context(Partition 
new_part, EnvironmentContext environment_context)')
+  print('  i32 add_partitions( new_parts)')
+  print('  i32 add_partitions_pspec( new_parts)')
+  print('  Partition append_partition(string db_name, string tbl_name,  
part_vals)')
+  print('  AddPartitionsResult add_partitions_req(AddPartitionsRequest 
request)')
+  print('  Partition append_partition_with_environment_context(string db_name, 
string tbl_name,  part_vals, EnvironmentContext environment_context)')
+  print('  Partition append_partition_by_name(string db_name, string tbl_name, 
string part_name)')
+  print('  Partition append_partition_by_name_with_environment_context(string 
db_name, string tbl_name, string part_name, EnvironmentContext 
environment_context)')
+  print('  bool drop_partition(string db_name, string tbl_name,  part_vals, 
bool deleteData)')
+  print('  bool drop_partition_with_environment_context(string db_name, string 
tbl_name,  part_vals, bool deleteData, EnvironmentContext environment_context)')
+  print('  bool drop_partition_by_name(string db_name, string tbl_name, string 
part_name, bool deleteData)')
+  print('  bool drop_partition_by_name_with_environment_context(string 
db_name, string tbl_name, string part_name, bool deleteData, EnvironmentContext 

[22/24] hive git commit: HIVE-13381 : Timestamp & date should have precedence in type hierarchy than string group (Ashutosh Chauhan via Jason Dere)

2016-04-04 Thread sseth
HIVE-13381 : Timestamp & date should have precedence in type hierarchy than 
string group (Ashutosh Chauhan via Jason Dere)

Signed-off-by: Ashutosh Chauhan 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b4465023
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b4465023
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b4465023

Branch: refs/heads/llap
Commit: b44650231ad2708fa73346164ae9c329ad36d6cb
Parents: 9830363
Author: Ashutosh Chauhan 
Authored: Tue Mar 29 19:01:24 2016 -0700
Committer: Ashutosh Chauhan 
Committed: Mon Apr 4 13:11:07 2016 -0700

--
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |   9 +-
 .../ql/exec/vector/VectorizationContext.java|  12 +-
 .../hive/ql/exec/TestFunctionRegistry.java  |  18 ++-
 .../exec/vector/TestVectorizationContext.java   |  17 +-
 .../queries/clientpositive/cast_on_constant.q   |   7 +
 .../clientpositive/cast_on_constant.q.out   | 160 +++
 6 files changed, 198 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b4465023/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index 56b96b4..1343b39 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -737,7 +737,14 @@ public final class FunctionRegistry {
   return getTypeInfoForPrimitiveCategory(
   (PrimitiveTypeInfo)a, (PrimitiveTypeInfo)b,PrimitiveCategory.STRING);
 }
-
+// timestamp/date is higher precedence than String_GROUP
+if (pgA == PrimitiveGrouping.STRING_GROUP && pgB == 
PrimitiveGrouping.DATE_GROUP) {
+  return b;
+}
+// date/timestamp is higher precedence than String_GROUP
+if (pgB == PrimitiveGrouping.STRING_GROUP && pgA == 
PrimitiveGrouping.DATE_GROUP) {
+  return a;
+}
 // Another special case, because timestamp is not implicitly convertible 
to numeric types.
 if ((pgA == PrimitiveGrouping.NUMERIC_GROUP || pgB == 
PrimitiveGrouping.NUMERIC_GROUP)
 && (pcA == PrimitiveCategory.TIMESTAMP || pcB == 
PrimitiveCategory.TIMESTAMP)) {

http://git-wip-us.apache.org/repos/asf/hive/blob/b4465023/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
index 1eb960d..30a0f5a 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
@@ -155,7 +155,7 @@ public class VectorizationContext {
 
   VectorExpressionDescriptor vMap;
 
-  private List initialColumnNames;
+  private final List initialColumnNames;
 
   private List projectedColumns;
   private List projectionColumnNames;
@@ -712,7 +712,7 @@ public class VectorizationContext {
 genericUdf = new GenericUDFToDate();
 break;
   case TIMESTAMP:
-genericUdf = new GenericUDFToUnixTimeStamp();
+genericUdf = new GenericUDFTimestamp();
 break;
   case INTERVAL_YEAR_MONTH:
 genericUdf = new GenericUDFToIntervalYearMonth();
@@ -1329,7 +1329,7 @@ public class VectorizationContext {
 case INT:
 case LONG:
   return InConstantType.INT_FAMILY;
-  
+
 case DATE:
   return InConstantType.TIMESTAMP;
 
@@ -1339,16 +1339,16 @@ public class VectorizationContext {
 case FLOAT:
 case DOUBLE:
   return InConstantType.FLOAT_FAMILY;
-  
+
 case STRING:
 case CHAR:
 case VARCHAR:
 case BINARY:
   return InConstantType.STRING_FAMILY;
-  
+
 case DECIMAL:
   return InConstantType.DECIMAL;
-  
+
 
 case INTERVAL_YEAR_MONTH:
 case INTERVAL_DAY_TIME:

http://git-wip-us.apache.org/repos/asf/hive/blob/b4465023/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
--
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java 
b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
index 6a83c32..8488c21 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
@@ -253,9 +253,13 @@ public class TestFunctionRegistry extends TestCase {

[08/24] hive git commit: HIVE-12650: Improve error messages for Hive on Spark in case the cluster has no resources available (Rui reviewed by Xuefu)

2016-04-04 Thread sseth
HIVE-12650: Improve error messages for Hive on Spark in case the cluster has no 
resources available (Rui reviewed by Xuefu)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/03b81bc9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/03b81bc9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/03b81bc9

Branch: refs/heads/llap
Commit: 03b81bc9c40b6de4f238f6b7660488e711b869c4
Parents: e085b7e
Author: Rui Li 
Authored: Fri Apr 1 14:36:18 2016 +0800
Committer: Rui Li 
Committed: Fri Apr 1 14:38:14 2016 +0800

--
 .../ql/exec/spark/RemoteHiveSparkClient.java| 20 +++-
 .../hadoop/hive/ql/exec/spark/SparkTask.java|  3 +++
 .../exec/spark/status/LocalSparkJobMonitor.java |  2 +-
 .../spark/status/RemoteSparkJobMonitor.java |  5 -
 .../hive/spark/client/SparkClientImpl.java  |  9 -
 5 files changed, 31 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/03b81bc9/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/RemoteHiveSparkClient.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/RemoteHiveSparkClient.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/RemoteHiveSparkClient.java
index 30e53d2..3a1577f 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/RemoteHiveSparkClient.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/RemoteHiveSparkClient.java
@@ -31,6 +31,7 @@ import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
@@ -110,7 +111,12 @@ public class RemoteHiveSparkClient implements 
HiveSparkClient {
   int curExecutors = 0;
   long ts = System.currentTimeMillis();
   do {
-curExecutors = getExecutorCount();
+try {
+  curExecutors = getExecutorCount(MAX_PREWARM_TIME, 
TimeUnit.MILLISECONDS);
+} catch (TimeoutException e) {
+  // let's don't fail on future timeout since we have a timeout for 
pre-warm
+  LOG.warn("Timed out getting executor count.", e);
+}
 if (curExecutors >= minExecutors) {
   LOG.info("Finished prewarming Spark executors. The current number of 
executors is " + curExecutors);
   return;
@@ -118,8 +124,8 @@ public class RemoteHiveSparkClient implements 
HiveSparkClient {
 Thread.sleep(500); // sleep half a second
   } while (System.currentTimeMillis() - ts < MAX_PREWARM_TIME);
 
-  LOG.info("Timeout (" + MAX_PREWARM_TIME + 
-  "s) occurred while prewarming executors. The current number of 
executors is " + curExecutors);
+  LOG.info("Timeout (" + MAX_PREWARM_TIME / 1000 + "s) occurred while 
prewarming executors. " +
+  "The current number of executors is " + curExecutors);
 }
   }
 
@@ -143,6 +149,11 @@ public class RemoteHiveSparkClient implements 
HiveSparkClient {
 return minExecutors;
   }
 
+  private int getExecutorCount(long timeout, TimeUnit unit) throws Exception {
+Future handler = remoteClient.getExecutorCount();
+return handler.get(timeout, unit);
+  }
+
   @Override
   public SparkConf getSparkConf() {
 return sparkConf;
@@ -150,8 +161,7 @@ public class RemoteHiveSparkClient implements 
HiveSparkClient {
 
   @Override
   public int getExecutorCount() throws Exception {
-Future handler = remoteClient.getExecutorCount();
-return handler.get(sparkClientTimtout, TimeUnit.SECONDS).intValue();
+return getExecutorCount(sparkClientTimtout, TimeUnit.SECONDS);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/03b81bc9/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
index 26cce1b..7f87adf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkTask.java
@@ -105,6 +105,9 @@ public class SparkTask extends Task {
 }
 LOG.info("Execution completed successfully");
   } else if (rc == 2) { // Cancel job if the monitor found job submission 
timeout.
+// TODO: If the timeout is because of lack of resources in the 
cluster, we should
+// ideally also cancel the app request here. But w/o facilities from 
Spark or YARN,
+// it's difficult to do it on hive side alone. See HIVE-12650.
 jobRef.cancelJob();
   }
   sparkJobStatus.cleanup();


[03/24] hive git commit: HIVE-13255: FloatTreeReader.nextVector is expensive (Prasanth Jayachandran reviewed by Gopal V)

2016-04-04 Thread sseth
HIVE-13255: FloatTreeReader.nextVector is expensive (Prasanth Jayachandran 
reviewed by Gopal V)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8225cb6a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8225cb6a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8225cb6a

Branch: refs/heads/llap
Commit: 8225cb6aedba7e49515da44f092405994f9a22b6
Parents: 4008845
Author: Prasanth Jayachandran 
Authored: Thu Mar 31 02:48:01 2016 -0700
Committer: Prasanth Jayachandran 
Committed: Thu Mar 31 02:48:01 2016 -0700

--
 .../java/org/apache/orc/impl/IntegerReader.java |  4 +-
 .../apache/orc/impl/RunLengthIntegerReader.java |  7 +--
 .../orc/impl/RunLengthIntegerReaderV2.java  |  7 +--
 .../org/apache/orc/impl/SerializationUtils.java | 34 ++-
 .../apache/orc/impl/TestSerializationUtils.java | 45 +--
 .../hadoop/hive/ql/io/orc/RecordReaderImpl.java | 16 +++
 .../hive/ql/io/orc/TreeReaderFactory.java   | 46 ++--
 7 files changed, 99 insertions(+), 60 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/8225cb6a/orc/src/java/org/apache/orc/impl/IntegerReader.java
--
diff --git a/orc/src/java/org/apache/orc/impl/IntegerReader.java 
b/orc/src/java/org/apache/orc/impl/IntegerReader.java
index b928559..7dfd289 100644
--- a/orc/src/java/org/apache/orc/impl/IntegerReader.java
+++ b/orc/src/java/org/apache/orc/impl/IntegerReader.java
@@ -60,8 +60,6 @@ public interface IntegerReader {
* @return
* @throws IOException
*/
-   void nextVector(LongColumnVector previous, long previousLen)
+   void nextVector(LongColumnVector previous, final int previousLen)
   throws IOException;
-
-  void setInStream(InStream data);
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/8225cb6a/orc/src/java/org/apache/orc/impl/RunLengthIntegerReader.java
--
diff --git a/orc/src/java/org/apache/orc/impl/RunLengthIntegerReader.java 
b/orc/src/java/org/apache/orc/impl/RunLengthIntegerReader.java
index f129c86..0c90cde 100644
--- a/orc/src/java/org/apache/orc/impl/RunLengthIntegerReader.java
+++ b/orc/src/java/org/apache/orc/impl/RunLengthIntegerReader.java
@@ -99,7 +99,7 @@ public class RunLengthIntegerReader implements IntegerReader {
   }
 
   @Override
-  public void nextVector(LongColumnVector previous, long previousLen) throws 
IOException {
+  public void nextVector(LongColumnVector previous, final int previousLen) 
throws IOException {
 previous.isRepeating = true;
 for (int i = 0; i < previousLen; i++) {
   if (!previous.isNull[i]) {
@@ -122,11 +122,6 @@ public class RunLengthIntegerReader implements 
IntegerReader {
   }
 
   @Override
-  public void setInStream(InStream data) {
-input = data;
-  }
-
-  @Override
   public void seek(PositionProvider index) throws IOException {
 input.seek(index);
 int consumed = (int) index.getNext();

http://git-wip-us.apache.org/repos/asf/hive/blob/8225cb6a/orc/src/java/org/apache/orc/impl/RunLengthIntegerReaderV2.java
--
diff --git a/orc/src/java/org/apache/orc/impl/RunLengthIntegerReaderV2.java 
b/orc/src/java/org/apache/orc/impl/RunLengthIntegerReaderV2.java
index 5f2a673..c6d685a 100644
--- a/orc/src/java/org/apache/orc/impl/RunLengthIntegerReaderV2.java
+++ b/orc/src/java/org/apache/orc/impl/RunLengthIntegerReaderV2.java
@@ -360,7 +360,7 @@ public class RunLengthIntegerReaderV2 implements 
IntegerReader {
   }
 
   @Override
-  public void nextVector(LongColumnVector previous, long previousLen) throws 
IOException {
+  public void nextVector(LongColumnVector previous, final int previousLen) 
throws IOException {
 previous.isRepeating = true;
 for (int i = 0; i < previousLen; i++) {
   if (!previous.isNull[i]) {
@@ -382,9 +382,4 @@ public class RunLengthIntegerReaderV2 implements 
IntegerReader {
   }
 }
   }
-
-  @Override
-  public void setInStream(InStream data) {
-input = data;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/8225cb6a/orc/src/java/org/apache/orc/impl/SerializationUtils.java
--
diff --git a/orc/src/java/org/apache/orc/impl/SerializationUtils.java 
b/orc/src/java/org/apache/orc/impl/SerializationUtils.java
index c1162e4..2e5a59b 100644
--- a/orc/src/java/org/apache/orc/impl/SerializationUtils.java
+++ b/orc/src/java/org/apache/orc/impl/SerializationUtils.java
@@ -18,8 +18,6 @@
 
 package org.apache.orc.impl;
 
-import org.apache.orc.impl.InStream;
-
 import java.io.EOFException;
 import java.io.IOException;
 import 

[05/24] hive git commit: HIVE-11766. LLAP: Remove MiniLlapCluster from shim layer after hadoop-1 removal. (Siddharth Seth, reviewed by Prasanth Jayachandran)

2016-04-04 Thread sseth
HIVE-11766. LLAP: Remove MiniLlapCluster from shim layer after hadoop-1
removal. (Siddharth Seth, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/184e0e1d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/184e0e1d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/184e0e1d

Branch: refs/heads/llap
Commit: 184e0e1da55c576cd8766c52423bcbcd4a2be69b
Parents: f41cbea
Author: Siddharth Seth 
Authored: Thu Mar 31 14:53:59 2016 -0700
Committer: Siddharth Seth 
Committed: Thu Mar 31 14:53:59 2016 -0700

--
 data/conf/llap/hive-site.xml| 44 ++
 data/conf/llap/llap-daemon-site.xml | 61 --
 itests/hive-unit/pom.xml| 11 ++-
 .../org/apache/hive/jdbc/miniHS2/MiniHS2.java   | 12 ++-
 itests/util/pom.xml | 11 +++
 .../apache/hadoop/hive/llap/LlapItUtils.java| 84 
 .../org/apache/hadoop/hive/ql/QTestUtil.java| 17 ++--
 .../configuration/LlapDaemonConfiguration.java  |  8 ++
 .../apache/hadoop/hive/shims/Hadoop23Shims.java | 66 +--
 .../apache/hadoop/hive/shims/HadoopShims.java   |  2 +-
 10 files changed, 175 insertions(+), 141 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/184e0e1d/data/conf/llap/hive-site.xml
--
diff --git a/data/conf/llap/hive-site.xml b/data/conf/llap/hive-site.xml
index c2bef58..72bdcfb 100644
--- a/data/conf/llap/hive-site.xml
+++ b/data/conf/llap/hive-site.xml
@@ -273,4 +273,48 @@
   false
 
 
+
+
+
+  hive.llap.daemon.service.hosts
+  localhost
+
+
+
+  hive.llap.daemon.service.port
+  0
+
+
+
+  hive.llap.daemon.num.executors
+  4
+
+
+
+  hive.llap.daemon.task.scheduler.wait.queue.size
+  4
+
+
+
+  hive.llap.cache.allow.synthetic.fileid
+  true
+
+
+
+
+  ipc.client.low-latency
+  true
+   
+
+
+  ipc.client.tcpnodelay
+  true
+   
+
+
+  ipc.clients-per-factory
+  4
+   
+
+
 

http://git-wip-us.apache.org/repos/asf/hive/blob/184e0e1d/data/conf/llap/llap-daemon-site.xml
--
diff --git a/data/conf/llap/llap-daemon-site.xml 
b/data/conf/llap/llap-daemon-site.xml
deleted file mode 100644
index 98c0f2b..000
--- a/data/conf/llap/llap-daemon-site.xml
+++ /dev/null
@@ -1,61 +0,0 @@
-
-
-
-
-
-  hive.llap.daemon.service.hosts
-  localhost
-
-
-
-  hive.llap.daemon.service.port
-  0
-
-
-
-  hive.llap.daemon.num.executors
-  4
-
-
-
-  hive.llap.daemon.task.scheduler.wait.queue.size
-  4
-
-
-
-  hive.llap.cache.allow.synthetic.fileid
-  true
-
-
-
-
-  ipc.client.low-latency
-  true
-   
-
-
-  ipc.client.tcpnodelay
-  true
-   
-
-
-  ipc.clients-per-factory
-  4
-   
-
-

http://git-wip-us.apache.org/repos/asf/hive/blob/184e0e1d/itests/hive-unit/pom.xml
--
diff --git a/itests/hive-unit/pom.xml b/itests/hive-unit/pom.xml
index 97786d9..7219f1d 100644
--- a/itests/hive-unit/pom.xml
+++ b/itests/hive-unit/pom.xml
@@ -75,6 +75,11 @@
   hive-hcatalog-streaming
   ${project.version}
 
+
+  org.apache.hive
+  hive-it-util
+  ${project.version}
+
 
 
   org.apache.hadoop
@@ -123,12 +128,6 @@
 
 
   org.apache.hive
-  hive-it-util
-  ${project.version}
-  test
-
-
-  org.apache.hive
   hive-jdbc
   ${project.version}
   test

http://git-wip-us.apache.org/repos/asf/hive/blob/184e0e1d/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
--
diff --git 
a/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java 
b/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
index a9d9c76..eca2317 100644
--- a/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
+++ b/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
@@ -31,6 +31,8 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.llap.LlapItUtils;
+import org.apache.hadoop.hive.llap.daemon.MiniLlapCluster;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.WindowsPathUtil;
 import org.apache.hadoop.hive.ql.exec.Utilities;
@@ -60,6 +62,7 @@ public class MiniHS2 extends AbstractHiveService {
   private static final AtomicLong hs2Counter = new AtomicLong();
   private MiniMrShim mr;
   private MiniDFSShim dfs;
+  private MiniLlapCluster llapCluster = null;
   

[04/24] hive git commit: HIVE-13379 : HIVE-12851 args do not work (slider-keytab-dir, etc.) (Sergey Shelukhin, reviewed by Siddharth Seth)

2016-04-04 Thread sseth
HIVE-13379 : HIVE-12851 args do not work (slider-keytab-dir, etc.) (Sergey 
Shelukhin, reviewed by Siddharth Seth)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f41cbea8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f41cbea8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f41cbea8

Branch: refs/heads/llap
Commit: f41cbea80dc8ca8765b02c19e6aeba772323895c
Parents: 8225cb6
Author: Sergey Shelukhin 
Authored: Thu Mar 31 11:53:39 2016 -0700
Committer: Sergey Shelukhin 
Committed: Thu Mar 31 11:53:39 2016 -0700

--
 .../hive/llap/cli/LlapOptionsProcessor.java | 25 +---
 1 file changed, 22 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/f41cbea8/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapOptionsProcessor.java
--
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapOptionsProcessor.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapOptionsProcessor.java
index b7f019c..cdc919e 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapOptionsProcessor.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapOptionsProcessor.java
@@ -39,9 +39,6 @@ public class LlapOptionsProcessor {
   public static final String OPTION_INSTANCES = "instances"; //forward as arg
   public static final String OPTION_NAME = "name"; // forward as arg
   public static final String OPTION_DIRECTORY = "directory"; // work-dir
-  public static final String OPTION_ARGS = "args"; // forward as arg
-  public static final String OPTION_LOGLEVEL = "loglevel"; // forward as arg
-  public static final String OPTION_CHAOS_MONKEY = "chaosmonkey"; // forward 
as arg
   public static final String OPTION_EXECUTORS = "executors"; // 
llap-daemon-site
   public static final String OPTION_CACHE = "cache"; // llap-daemon-site
   public static final String OPTION_SIZE = "size"; // forward via config.json
@@ -54,6 +51,16 @@ public class LlapOptionsProcessor {
   public static final String OPTION_LLAP_QUEUE = "queue"; // forward via 
config.json
   public static final String OPTION_IO_THREADS = "iothreads"; // 
llap-daemon-site
 
+  // Options for the pythin script that are here because our option parser 
cannot ignore the unknown ones
+  public static final String OPTION_ARGS = "args"; // forward as arg
+  public static final String OPTION_LOGLEVEL = "loglevel"; // forward as arg
+  public static final String OPTION_CHAOS_MONKEY = "chaosmonkey"; // forward 
as arg
+  public static final String OPTION_SLIDER_KEYTAB_DIR = "slider-keytab-dir";
+  public static final String OPTION_SLIDER_KEYTAB = "slider-keytab";
+  public static final String OPTION_SLIDER_PRINCIPAL = "slider-principal";
+  public static final String OPTION_SLIDER_DEFAULT_KEYTAB = 
"slider-default-keytab";
+
+
   public class LlapOptions {
 private final int instances;
 private final String directory;
@@ -171,6 +178,18 @@ public class LlapOptionsProcessor {
 
options.addOption(OptionBuilder.hasArg().withArgName(OPTION_CHAOS_MONKEY).withLongOpt(OPTION_CHAOS_MONKEY)
 .withDescription("chaosmonkey interval").create('m'));
 
+
options.addOption(OptionBuilder.hasArg(false).withArgName(OPTION_SLIDER_DEFAULT_KEYTAB).withLongOpt(OPTION_SLIDER_DEFAULT_KEYTAB)
+.withDescription("try to set default settings for Slider AM keytab; 
mostly for dev testing").create());
+
+
options.addOption(OptionBuilder.hasArg().withArgName(OPTION_SLIDER_KEYTAB_DIR).withLongOpt(OPTION_SLIDER_KEYTAB_DIR)
+.withDescription("Slider AM keytab directory on HDFS (where the 
headless user keytab is stored by Slider keytab installation, e.g. 
.slider/keytabs/llap)").create());
+
+
options.addOption(OptionBuilder.hasArg().withArgName(OPTION_SLIDER_KEYTAB).withLongOpt(OPTION_SLIDER_KEYTAB)
+.withDescription("Slider AM keytab file name inside " + 
OPTION_SLIDER_KEYTAB_DIR).create());
+
+
options.addOption(OptionBuilder.hasArg().withArgName(OPTION_SLIDER_PRINCIPAL).withLongOpt(OPTION_SLIDER_PRINCIPAL)
+.withDescription("Slider AM principal; should be the user running the 
cluster, e.g. h...@example.com").create());
+
 
options.addOption(OptionBuilder.hasArg().withArgName(OPTION_EXECUTORS).withLongOpt(OPTION_EXECUTORS)
 .withDescription("executor per instance").create('e'));
 



[06/24] hive git commit: HIVE-13364. Allow llap to work with dynamic ports for rpc, shuffle, ui. (Siddharth Seth, reviewed by Prasanth Jayachandran)

2016-04-04 Thread sseth
HIVE-13364. Allow llap to work with dynamic ports for rpc, shuffle, ui. 
(Siddharth Seth, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/547c5cfc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/547c5cfc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/547c5cfc

Branch: refs/heads/llap
Commit: 547c5cfce9587de31a58622589a63eba62a4b120
Parents: 184e0e1
Author: Siddharth Seth 
Authored: Thu Mar 31 14:54:53 2016 -0700
Committer: Siddharth Seth 
Committed: Thu Mar 31 14:54:53 2016 -0700

--
 .../impl/LlapZookeeperRegistryImpl.java |  9 +++---
 .../hive/llap/daemon/impl/LlapDaemon.java   | 34 
 .../daemon/impl/LlapProtocolServerImpl.java |  7 ++--
 .../daemon/services/impl/LlapWebServices.java   | 13 ++--
 .../hive/llap/daemon/MiniLlapCluster.java   |  4 ++-
 5 files changed, 50 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/547c5cfc/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapZookeeperRegistryImpl.java
--
diff --git 
a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapZookeeperRegistryImpl.java
 
b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapZookeeperRegistryImpl.java
index c611d1a..ba38fb8 100644
--- 
a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapZookeeperRegistryImpl.java
+++ 
b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapZookeeperRegistryImpl.java
@@ -31,7 +31,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.UUID;
-import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 
@@ -68,8 +67,6 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
 import org.apache.zookeeper.ZooDefs;
 import org.apache.zookeeper.client.ZooKeeperSaslClient;
 import org.apache.zookeeper.data.ACL;
@@ -285,8 +282,10 @@ public class LlapZookeeperRegistryImpl implements 
ServiceRegistry {
 // No node exists, throw exception
 throw new Exception("Unable to create znode for this LLAP instance on 
ZooKeeper.");
   }
-  LOG.info("Created a znode on ZooKeeper for LLAP instance: {} znodePath: 
{}", rpcEndpoint,
-  znodePath);
+  LOG.info(
+  "Registered node. Created a znode on ZooKeeper for LLAP instance: 
rpc: {}, shuffle: {}," +
+  " webui: {}, mgmt: {}, znodePath: {} ",
+  rpcEndpoint, getShuffleEndpoint(), getServicesEndpoint(), 
getMngEndpoint(), znodePath);
 } catch (Exception e) {
   LOG.error("Unable to create a znode for this server instance", e);
   CloseableUtils.closeQuietly(znode);

http://git-wip-us.apache.org/repos/asf/hive/blob/547c5cfc/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
--
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
index c8734a5..2fe59a2 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
@@ -100,7 +100,7 @@ public class LlapDaemon extends CompositeService implements 
ContainerRunner, Lla
 
   public LlapDaemon(Configuration daemonConf, int numExecutors, long 
executorMemoryBytes,
   boolean ioEnabled, boolean isDirectCache, long ioMemoryBytes, String[] 
localDirs, int srvPort,
-  int mngPort, int shufflePort) {
+  int mngPort, int shufflePort, int webPort) {
 super("LlapDaemon");
 
 initializeLogging();
@@ -140,6 +140,7 @@ public class LlapDaemon extends CompositeService implements 
ContainerRunner, Lla
 "numExecutors=" + numExecutors +
 ", rpcListenerPort=" + srvPort +
 ", mngListenerPort=" + mngPort +
+", webPort=" + webPort +
 ", workDirs=" + Arrays.toString(localDirs) +
 ", shufflePort=" + shufflePort +
 ", executorMemory=" + executorMemoryBytes +
@@ -206,12 +207,11 @@ public class LlapDaemon extends CompositeService 
implements ContainerRunner, Lla
 amReporter, executorClassLoader);
 addIfService(containerRunner);
 
-this.registry = new LlapRegistryService(true);
-addIfService(registry);
+
 

[10/24] hive git commit: HIVE-13376 : HoS emits too many logs with application state (Szehon, via Rui Li and Xuefu)

2016-04-04 Thread sseth
HIVE-13376 : HoS emits too many logs with application state (Szehon, via Rui Li 
and Xuefu)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9a0dabdf
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9a0dabdf
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9a0dabdf

Branch: refs/heads/llap
Commit: 9a0dabdf439e11cccf9aa02e5356ab21617e1f6e
Parents: ac273b6
Author: Szehon Ho 
Authored: Fri Apr 1 11:47:52 2016 -0700
Committer: Szehon Ho 
Committed: Fri Apr 1 11:49:09 2016 -0700

--
 .../hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java   | 9 +
 1 file changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/9a0dabdf/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java
index 2427321..b36c60e 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HiveSparkClientFactory.java
@@ -53,6 +53,7 @@ public class HiveSparkClientFactory {
   private static final String SPARK_DEFAULT_APP_NAME = "Hive on Spark";
   private static final String SPARK_DEFAULT_SERIALIZER = 
"org.apache.spark.serializer.KryoSerializer";
   private static final String SPARK_DEFAULT_REFERENCE_TRACKING = "false";
+  private static final String SPARK_YARN_REPORT_INTERVAL = 
"spark.yarn.report.interval";
 
   public static HiveSparkClient createHiveSparkClient(HiveConf hiveconf) 
throws Exception {
 Map sparkConf = initiateSparkConf(hiveconf);
@@ -187,6 +188,14 @@ public class HiveSparkClientFactory {
   }
 }
 
+//The application reports tend to spam the hive logs.  This is controlled 
by spark, and the default seems to be 1s.
+//If it is not specified, set it to a much higher number.  It can always 
be overriden by user.
+String sparkYarnReportInterval = sparkConf.get(SPARK_YARN_REPORT_INTERVAL);
+if (sparkMaster.startsWith("yarn") && sparkYarnReportInterval == null) {
+  //the new version of spark also takes time-units, but old versions do 
not.
+  sparkConf.put(SPARK_YARN_REPORT_INTERVAL, "6");
+}
+
 return sparkConf;
   }
 



[07/24] hive git commit: HIVE-13402 : Temporarily disable failing spark tests (Siddharth Seth via Ashutosh Chauhan)

2016-04-04 Thread sseth
HIVE-13402 : Temporarily disable failing spark tests (Siddharth Seth via 
Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e085b7e9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e085b7e9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e085b7e9

Branch: refs/heads/llap
Commit: e085b7e9bd059d91aaf013df0db4d71dca90ec6f
Parents: 547c5cf
Author: Ashutosh Chauhan 
Authored: Thu Mar 31 15:51:01 2016 -0700
Committer: Ashutosh Chauhan 
Committed: Thu Mar 31 15:51:01 2016 -0700

--
 itests/src/test/resources/testconfiguration.properties | 3 ---
 1 file changed, 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/e085b7e9/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index 232f84e..cf9e4c9 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -886,7 +886,6 @@ spark.query.files=add_part_multiple.q, \
   input18.q, \
   input1_limit.q, \
   input_part2.q, \
-  insert1.q, \
   insert_into1.q, \
   insert_into2.q, \
   insert_into3.q, \
@@ -946,7 +945,6 @@ spark.query.files=add_part_multiple.q, \
   join_cond_pushdown_unqual2.q, \
   join_cond_pushdown_unqual3.q, \
   join_cond_pushdown_unqual4.q, \
-  join_empty.q, \
   join_filters_overlap.q, \
   join_hive_626.q, \
   join_literals.q, \
@@ -1154,7 +1152,6 @@ spark.query.files=add_part_multiple.q, \
   timestamp_lazy.q, \
   timestamp_null.q, \
   timestamp_udf.q, \
-  transform1.q, \
   transform2.q, \
   transform_ppr1.q, \
   transform_ppr2.q, \



[02/24] hive git commit: HIVE-13372: Hive Macro overwritten when multiple macros are used in one column (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

2016-04-04 Thread sseth
HIVE-13372: Hive Macro overwritten when multiple macros are used in one column 
(Pengcheng Xiong, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/40088453
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/40088453
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/40088453

Branch: refs/heads/llap
Commit: 40088453b57a9bc6abfa7d0a23c1a78d39390212
Parents: 255069e
Author: Pengcheng Xiong 
Authored: Wed Mar 30 21:34:01 2016 -0700
Committer: Pengcheng Xiong 
Committed: Wed Mar 30 21:34:01 2016 -0700

--
 .../hive/ql/plan/ExprNodeGenericFuncDesc.java   |  5 +-
 ql/src/test/queries/clientpositive/macro_1.q| 29 +++
 .../test/results/clientpositive/macro_1.q.out   | 84 
 3 files changed, 117 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/40088453/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeGenericFuncDesc.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeGenericFuncDesc.java 
b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeGenericFuncDesc.java
index b7c1445..9e0159c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeGenericFuncDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeGenericFuncDesc.java
@@ -284,7 +284,10 @@ public class ExprNodeGenericFuncDesc extends ExprNodeDesc 
implements
 }
 
 if (genericUDF instanceof GenericUDFMacro) {
-  if (funcText != null && !funcText.equals(dest.funcText)) {
+  // if getMacroName is null, we always treat it different from others.
+  if (((GenericUDFMacro) genericUDF).getMacroName() == null
+  || !(((GenericUDFMacro) genericUDF).getMacroName()
+  .equals(((GenericUDFMacro) dest.genericUDF).getMacroName( {
 return false;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/40088453/ql/src/test/queries/clientpositive/macro_1.q
--
diff --git a/ql/src/test/queries/clientpositive/macro_1.q 
b/ql/src/test/queries/clientpositive/macro_1.q
new file mode 100644
index 000..dddc8e2
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/macro_1.q
@@ -0,0 +1,29 @@
+set hive.mapred.mode=nonstrict;
+
+CREATE TEMPORARY MACRO STRING_LEN(x string) length(x);
+CREATE TEMPORARY MACRO STRING_LEN_PLUS_ONE(x string) length(x)+1;
+CREATE TEMPORARY MACRO STRING_LEN_PLUS_TWO(x string) length(x)+2;
+
+create table macro_test (x string);
+
+insert into table macro_test values ("bb"), ("a"), ("ccc");
+
+SELECT
+CONCAT(STRING_LEN(x), ":", STRING_LEN_PLUS_ONE(x), ":", 
STRING_LEN_PLUS_TWO(x)) a
+FROM macro_test;
+
+SELECT
+CONCAT(STRING_LEN(x), ":", STRING_LEN_PLUS_ONE(x), ":", 
STRING_LEN_PLUS_TWO(x)) a
+FROM macro_test
+sort by a;
+
+
+SELECT
+CONCAT(STRING_LEN(x), ":", STRING_LEN_PLUS_ONE(x), ":", 
STRING_LEN_PLUS_TWO(x)) a
+FROM macro_test
+sort by a desc;
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/hive/blob/40088453/ql/src/test/results/clientpositive/macro_1.q.out
--
diff --git a/ql/src/test/results/clientpositive/macro_1.q.out 
b/ql/src/test/results/clientpositive/macro_1.q.out
new file mode 100644
index 000..82b4ad9
--- /dev/null
+++ b/ql/src/test/results/clientpositive/macro_1.q.out
@@ -0,0 +1,84 @@
+PREHOOK: query: CREATE TEMPORARY MACRO STRING_LEN(x string) length(x)
+PREHOOK: type: CREATEMACRO
+PREHOOK: Output: database:default
+POSTHOOK: query: CREATE TEMPORARY MACRO STRING_LEN(x string) length(x)
+POSTHOOK: type: CREATEMACRO
+POSTHOOK: Output: database:default
+PREHOOK: query: CREATE TEMPORARY MACRO STRING_LEN_PLUS_ONE(x string) 
length(x)+1
+PREHOOK: type: CREATEMACRO
+PREHOOK: Output: database:default
+POSTHOOK: query: CREATE TEMPORARY MACRO STRING_LEN_PLUS_ONE(x string) 
length(x)+1
+POSTHOOK: type: CREATEMACRO
+POSTHOOK: Output: database:default
+PREHOOK: query: CREATE TEMPORARY MACRO STRING_LEN_PLUS_TWO(x string) 
length(x)+2
+PREHOOK: type: CREATEMACRO
+PREHOOK: Output: database:default
+POSTHOOK: query: CREATE TEMPORARY MACRO STRING_LEN_PLUS_TWO(x string) 
length(x)+2
+POSTHOOK: type: CREATEMACRO
+POSTHOOK: Output: database:default
+PREHOOK: query: create table macro_test (x string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@macro_test
+POSTHOOK: query: create table macro_test (x string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@macro_test
+PREHOOK: query: insert into table macro_test values ("bb"), ("a"), ("ccc")
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: 

[11/24] hive git commit: HIVE-13402 : Temporarily disable failing spark tests (Addendum)

2016-04-04 Thread sseth
HIVE-13402 : Temporarily disable failing spark tests (Addendum)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/21f18ada
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/21f18ada
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/21f18ada

Branch: refs/heads/llap
Commit: 21f18adac0f10008e0fe18be8e2d8b7070399066
Parents: 9a0dabd
Author: Ashutosh Chauhan 
Authored: Fri Apr 1 17:14:52 2016 -0700
Committer: Ashutosh Chauhan 
Committed: Fri Apr 1 17:14:52 2016 -0700

--
 itests/src/test/resources/testconfiguration.properties| 4 
 ql/src/test/results/clientpositive/spark/temp_table.q.out | 2 ++
 2 files changed, 2 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/21f18ada/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index cf9e4c9..889884c 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -719,7 +719,6 @@ spark.query.files=add_part_multiple.q, \
   auto_join3.q, \
   auto_join30.q, \
   auto_join31.q, \
-  auto_join32.q, \
   auto_join4.q, \
   auto_join5.q, \
   auto_join6.q, \
@@ -740,7 +739,6 @@ spark.query.files=add_part_multiple.q, \
   auto_sortmerge_join_14.q, \
   auto_sortmerge_join_15.q, \
   auto_sortmerge_join_16.q, \
-  auto_sortmerge_join_2.q, \
   auto_sortmerge_join_3.q, \
   auto_sortmerge_join_4.q, \
   auto_sortmerge_join_5.q, \
@@ -926,7 +924,6 @@ spark.query.files=add_part_multiple.q, \
   join38.q, \
   join39.q, \
   join4.q, \
-  join40.q, \
   join41.q, \
   join5.q, \
   join6.q, \
@@ -1029,7 +1026,6 @@ spark.query.files=add_part_multiple.q, \
   ppd_join.q, \
   ppd_join2.q, \
   ppd_join3.q, \
-  ppd_join4.q, \
   ppd_join5.q, \
   ppd_join_filter.q, \
   ppd_multi_insert.q, \

http://git-wip-us.apache.org/repos/asf/hive/blob/21f18ada/ql/src/test/results/clientpositive/spark/temp_table.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/temp_table.q.out 
b/ql/src/test/results/clientpositive/spark/temp_table.q.out
index 1bc8b19..c2ec3b4 100644
--- a/ql/src/test/results/clientpositive/spark/temp_table.q.out
+++ b/ql/src/test/results/clientpositive/spark/temp_table.q.out
@@ -461,6 +461,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@s
+POSTHOOK: Lineage: s.key SIMPLE [(src)src.FieldSchema(name:key, type:string, 
comment:default), ]
+POSTHOOK: Lineage: s.value SIMPLE [(src)src.FieldSchema(name:value, 
type:string, comment:default), ]
 PREHOOK: query: select count(*) from s
 PREHOOK: type: QUERY
 PREHOOK: Input: default@s



[16/24] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)

2016-04-04 Thread sseth
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-php/ThriftHive.php
--
diff --git a/service/src/gen/thrift/gen-php/ThriftHive.php 
b/service/src/gen/thrift/gen-php/ThriftHive.php
new file mode 100644
index 000..23dc8fd
--- /dev/null
+++ b/service/src/gen/thrift/gen-php/ThriftHive.php
@@ -0,0 +1,1943 @@
+send_execute($query);
+$this->recv_execute();
+  }
+
+  public function send_execute($query)
+  {
+$args = new \ThriftHive_execute_args();
+$args->query = $query;
+$bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_write_binary');
+if ($bin_accel)
+{
+  thrift_protocol_write_binary($this->output_, 'execute', 
TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+}
+else
+{
+  $this->output_->writeMessageBegin('execute', TMessageType::CALL, 
$this->seqid_);
+  $args->write($this->output_);
+  $this->output_->writeMessageEnd();
+  $this->output_->getTransport()->flush();
+}
+  }
+
+  public function recv_execute()
+  {
+$bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_read_binary');
+if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 
'\ThriftHive_execute_result', $this->input_->isStrictRead());
+else
+{
+  $rseqid = 0;
+  $fname = null;
+  $mtype = 0;
+
+  $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+  if ($mtype == TMessageType::EXCEPTION) {
+$x = new TApplicationException();
+$x->read($this->input_);
+$this->input_->readMessageEnd();
+throw $x;
+  }
+  $result = new \ThriftHive_execute_result();
+  $result->read($this->input_);
+  $this->input_->readMessageEnd();
+}
+if ($result->ex !== null) {
+  throw $result->ex;
+}
+return;
+  }
+
+  public function fetchOne()
+  {
+$this->send_fetchOne();
+return $this->recv_fetchOne();
+  }
+
+  public function send_fetchOne()
+  {
+$args = new \ThriftHive_fetchOne_args();
+$bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_write_binary');
+if ($bin_accel)
+{
+  thrift_protocol_write_binary($this->output_, 'fetchOne', 
TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+}
+else
+{
+  $this->output_->writeMessageBegin('fetchOne', TMessageType::CALL, 
$this->seqid_);
+  $args->write($this->output_);
+  $this->output_->writeMessageEnd();
+  $this->output_->getTransport()->flush();
+}
+  }
+
+  public function recv_fetchOne()
+  {
+$bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_read_binary');
+if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 
'\ThriftHive_fetchOne_result', $this->input_->isStrictRead());
+else
+{
+  $rseqid = 0;
+  $fname = null;
+  $mtype = 0;
+
+  $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+  if ($mtype == TMessageType::EXCEPTION) {
+$x = new TApplicationException();
+$x->read($this->input_);
+$this->input_->readMessageEnd();
+throw $x;
+  }
+  $result = new \ThriftHive_fetchOne_result();
+  $result->read($this->input_);
+  $this->input_->readMessageEnd();
+}
+if ($result->success !== null) {
+  return $result->success;
+}
+if ($result->ex !== null) {
+  throw $result->ex;
+}
+throw new \Exception("fetchOne failed: unknown result");
+  }
+
+  public function fetchN($numRows)
+  {
+$this->send_fetchN($numRows);
+return $this->recv_fetchN();
+  }
+
+  public function send_fetchN($numRows)
+  {
+$args = new \ThriftHive_fetchN_args();
+$args->numRows = $numRows;
+$bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_write_binary');
+if ($bin_accel)
+{
+  thrift_protocol_write_binary($this->output_, 'fetchN', 
TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+}
+else
+{
+  $this->output_->writeMessageBegin('fetchN', TMessageType::CALL, 
$this->seqid_);
+  $args->write($this->output_);
+  $this->output_->writeMessageEnd();
+  $this->output_->getTransport()->flush();
+}
+  }
+
+  public function recv_fetchN()
+  {
+$bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && 
function_exists('thrift_protocol_read_binary');
+if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, 
'\ThriftHive_fetchN_result', $this->input_->isStrictRead());
+else
+{
+  $rseqid = 0;
+  $fname = null;
+  $mtype = 0;
+
+  $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+  if ($mtype == 

[24/24] hive git commit: HIVE-HIVE-13419 part 2. Merge remote-tracking branch 'origin/master' into llap

2016-04-04 Thread sseth
HIVE-HIVE-13419 part 2. Merge remote-tracking branch 'origin/master' into llap

Conflicts:

llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/79c1c691
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/79c1c691
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/79c1c691

Branch: refs/heads/llap
Commit: 79c1c691e529107cba27eabefb94707645a39987
Parents: a7b0ca7 91ab819
Author: Siddharth Seth 
Authored: Mon Apr 4 15:37:10 2016 -0700
Committer: Siddharth Seth 
Committed: Mon Apr 4 15:37:10 2016 -0700

--
 beeline/pom.xml.orig|  169 +
 .../java/org/apache/hive/beeline/BeeLine.java   |   18 +-
 .../apache/hive/beeline/cli/TestHiveCli.java|   15 +-
 data/conf/llap/hive-site.xml|   44 +
 data/conf/llap/llap-daemon-site.xml |   61 -
 .../org/apache/hive/minikdc/MiniHiveKdc.java|   46 +-
 .../minikdc/TestJdbcNonKrbSASLWithMiniKdc.java  |  103 +
 itests/hive-unit/pom.xml|   13 +-
 .../org/apache/hive/jdbc/miniHS2/MiniHS2.java   |   26 +-
 .../test/resources/testconfiguration.properties |7 -
 itests/util/pom.xml |   11 +
 .../apache/hadoop/hive/llap/LlapItUtils.java|   90 +
 .../org/apache/hadoop/hive/ql/QTestUtil.java|   25 +-
 .../impl/LlapZookeeperRegistryImpl.java |9 +-
 .../hive/llap/tez/LlapProtocolClientProxy.java  |4 +-
 .../hive/llap/cli/LlapOptionsProcessor.java |   25 +-
 .../configuration/LlapDaemonConfiguration.java  |8 +
 .../hive/llap/daemon/impl/LlapDaemon.java   |   34 +-
 .../daemon/impl/LlapProtocolServerImpl.java |7 +-
 .../daemon/services/impl/LlapWebServices.java   |   13 +-
 .../hive/llap/daemon/MiniLlapCluster.java   |  147 +-
 .../llap/tezplugins/LlapTaskCommunicator.java   |   37 +-
 .../tezplugins/LlapTaskSchedulerService.java|7 +
 .../tezplugins/helpers/SourceStateTracker.java  |2 +-
 .../tezplugins/TestLlapTaskCommunicator.java|  304 +-
 .../java/org/apache/orc/impl/IntegerReader.java |4 +-
 .../apache/orc/impl/RunLengthIntegerReader.java |7 +-
 .../orc/impl/RunLengthIntegerReaderV2.java  |7 +-
 .../org/apache/orc/impl/SerializationUtils.java |   34 +-
 .../apache/orc/impl/TestSerializationUtils.java |   45 +-
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |9 +-
 .../ql/exec/spark/HiveSparkClientFactory.java   |9 +
 .../ql/exec/spark/RemoteHiveSparkClient.java|   20 +-
 .../hadoop/hive/ql/exec/spark/SparkTask.java|3 +
 .../exec/spark/status/LocalSparkJobMonitor.java |2 +-
 .../spark/status/RemoteSparkJobMonitor.java |5 +-
 .../ql/exec/vector/VectorizationContext.java|   12 +-
 .../hadoop/hive/ql/io/orc/RecordReaderImpl.java |   16 +-
 .../hive/ql/io/orc/TreeReaderFactory.java   |   46 +-
 .../hadoop/hive/ql/parse/ParseContext.java  |7 +
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   29 +-
 .../hadoop/hive/ql/parse/TaskCompiler.java  |2 +-
 .../hive/ql/plan/ExprNodeGenericFuncDesc.java   |5 +-
 .../hive/ql/exec/TestFunctionRegistry.java  |   18 +-
 .../exec/vector/TestVectorizationContext.java   |   17 +-
 .../queries/clientpositive/cast_on_constant.q   |7 +
 ql/src/test/queries/clientpositive/macro_1.q|   29 +
 .../clientpositive/cast_on_constant.q.out   |  160 +
 .../test/results/clientpositive/macro_1.q.out   |   84 +
 .../clientpositive/spark/temp_table.q.out   |2 +
 service-rpc/src/gen/thrift/gen-py/__init__.py   |0
 service/src/gen/thrift/gen-cpp/ThriftHive.cpp   | 3544 
 service/src/gen/thrift/gen-cpp/ThriftHive.h | 1224 +++
 .../gen-cpp/ThriftHive_server.skeleton.cpp  |   84 +
 .../thrift/gen-cpp/hive_service_constants.cpp   |   17 +
 .../gen/thrift/gen-cpp/hive_service_constants.h |   24 +
 .../gen/thrift/gen-cpp/hive_service_types.cpp   |  351 +
 .../src/gen/thrift/gen-cpp/hive_service_types.h |  176 +
 .../hadoop/hive/service/HiveClusterStatus.java  |  901 ++
 .../hive/service/HiveServerException.java   |  601 ++
 .../hadoop/hive/service/JobTrackerState.java|   45 +
 .../apache/hadoop/hive/service/ThriftHive.java  | 7784 ++
 service/src/gen/thrift/gen-php/ThriftHive.php   | 1943 +
 service/src/gen/thrift/gen-php/Types.php|  338 +
 service/src/gen/thrift/gen-py/__init__.py   |0
 .../gen-py/hive_service/ThriftHive-remote   | 1242 +++
 .../thrift/gen-py/hive_service/ThriftHive.py| 1674 
 .../gen/thrift/gen-py/hive_service/__init__.py  |1 +
 .../gen/thrift/gen-py/hive_service/constants.py |   11 +
 .../gen/thrift/gen-py/hive_service/ttypes.py|  260 +
 .../gen/thrift/gen-rb/hive_service_constants.rb |9 +
 

[17/24] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)

2016-04-04 Thread sseth
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java
--
diff --git 
a/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java
 
b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java
new file mode 100644
index 000..934a8a5
--- /dev/null
+++ 
b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java
@@ -0,0 +1,7784 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.service;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+public class ThriftHive {
+
+  public interface Iface extends 
org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface {
+
+public void execute(String query) throws HiveServerException, 
org.apache.thrift.TException;
+
+public String fetchOne() throws HiveServerException, 
org.apache.thrift.TException;
+
+public List fetchN(int numRows) throws HiveServerException, 
org.apache.thrift.TException;
+
+public List fetchAll() throws HiveServerException, 
org.apache.thrift.TException;
+
+public org.apache.hadoop.hive.metastore.api.Schema getSchema() throws 
HiveServerException, org.apache.thrift.TException;
+
+public org.apache.hadoop.hive.metastore.api.Schema getThriftSchema() 
throws HiveServerException, org.apache.thrift.TException;
+
+public HiveClusterStatus getClusterStatus() throws HiveServerException, 
org.apache.thrift.TException;
+
+public org.apache.hadoop.hive.ql.plan.api.QueryPlan getQueryPlan() throws 
HiveServerException, org.apache.thrift.TException;
+
+public void clean() throws org.apache.thrift.TException;
+
+  }
+
+  public interface AsyncIface extends 
org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore .AsyncIface {
+
+public void execute(String query, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
+
+public void fetchOne(org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
+
+public void fetchN(int numRows, 
org.apache.thrift.async.AsyncMethodCallback resultHandler) throws 
org.apache.thrift.TException;
+
+public void fetchAll(org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
+
+public void getSchema(org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
+
+public void getThriftSchema(org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
+
+public void getClusterStatus(org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
+
+public void getQueryPlan(org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
+
+public void clean(org.apache.thrift.async.AsyncMethodCallback 
resultHandler) throws org.apache.thrift.TException;
+
+  }
+
+  public static class Client extends 
org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Client implements 
Iface {
+public static class Factory implements 
org.apache.thrift.TServiceClientFactory {
+  public Factory() {}
+  public Client getClient(org.apache.thrift.protocol.TProtocol prot) {
+return new Client(prot);
+  }
+  public Client getClient(org.apache.thrift.protocol.TProtocol iprot, 
org.apache.thrift.protocol.TProtocol oprot) {
+return new Client(iprot, oprot);
+  }
+}
+
+public Client(org.apache.thrift.protocol.TProtocol prot)
+{
+  super(prot, prot);
+}
+
+public Client(org.apache.thrift.protocol.TProtocol iprot, 
org.apache.thrift.protocol.TProtocol oprot) {
+  super(iprot, oprot);
+}
+
+public void execute(String query) throws 

[21/24] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)

2016-04-04 Thread sseth
HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, 
reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/98303635
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/98303635
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/98303635

Branch: refs/heads/llap
Commit: 983036358633cfbb6aec30003faac8280372b2c9
Parents: 6a1f8a8
Author: Wei Zheng 
Authored: Mon Apr 4 11:18:25 2016 -0700
Committer: Wei Zheng 
Committed: Mon Apr 4 11:18:25 2016 -0700

--
 service-rpc/src/gen/thrift/gen-py/__init__.py   |0
 service/src/gen/thrift/gen-cpp/ThriftHive.cpp   | 3544 
 service/src/gen/thrift/gen-cpp/ThriftHive.h | 1224 +++
 .../gen-cpp/ThriftHive_server.skeleton.cpp  |   84 +
 .../thrift/gen-cpp/hive_service_constants.cpp   |   17 +
 .../gen/thrift/gen-cpp/hive_service_constants.h |   24 +
 .../gen/thrift/gen-cpp/hive_service_types.cpp   |  351 +
 .../src/gen/thrift/gen-cpp/hive_service_types.h |  176 +
 .../hadoop/hive/service/HiveClusterStatus.java  |  901 ++
 .../hive/service/HiveServerException.java   |  601 ++
 .../hadoop/hive/service/JobTrackerState.java|   45 +
 .../apache/hadoop/hive/service/ThriftHive.java  | 7784 ++
 service/src/gen/thrift/gen-php/ThriftHive.php   | 1943 +
 service/src/gen/thrift/gen-php/Types.php|  338 +
 service/src/gen/thrift/gen-py/__init__.py   |0
 .../gen-py/hive_service/ThriftHive-remote   | 1242 +++
 .../thrift/gen-py/hive_service/ThriftHive.py| 1674 
 .../gen/thrift/gen-py/hive_service/__init__.py  |1 +
 .../gen/thrift/gen-py/hive_service/constants.py |   11 +
 .../gen/thrift/gen-py/hive_service/ttypes.py|  260 +
 .../gen/thrift/gen-rb/hive_service_constants.rb |9 +
 .../src/gen/thrift/gen-rb/hive_service_types.rb |   68 +
 service/src/gen/thrift/gen-rb/thrift_hive.rb|  555 ++
 23 files changed, 20852 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service-rpc/src/gen/thrift/gen-py/__init__.py
--
diff --git a/service-rpc/src/gen/thrift/gen-py/__init__.py 
b/service-rpc/src/gen/thrift/gen-py/__init__.py
new file mode 100644
index 000..e69de29



[12/24] hive git commit: HIVE-10280. LLAP: Handle errors while sending source state updates to the daemons. (Siddharth Seth, reviewed by Sergey Shelukhin)

2016-04-04 Thread sseth
HIVE-10280. LLAP: Handle errors while sending source state updates to the 
daemons. (Siddharth Seth, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d94e8d08
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d94e8d08
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d94e8d08

Branch: refs/heads/llap
Commit: d94e8d08dd1d92c9eee99f60273e895a4a633b23
Parents: 21f18ad
Author: Siddharth Seth 
Authored: Sat Apr 2 15:06:34 2016 -0700
Committer: Siddharth Seth 
Committed: Sat Apr 2 15:06:34 2016 -0700

--
 .../hive/llap/tez/LlapProtocolClientProxy.java  |   4 +-
 .../llap/tezplugins/LlapTaskCommunicator.java   |  37 ++-
 .../tezplugins/LlapTaskSchedulerService.java|   7 +
 .../tezplugins/helpers/SourceStateTracker.java  |   2 +-
 .../tezplugins/TestLlapTaskCommunicator.java| 304 ++-
 5 files changed, 340 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/d94e8d08/llap-client/src/java/org/apache/hadoop/hive/llap/tez/LlapProtocolClientProxy.java
--
diff --git 
a/llap-client/src/java/org/apache/hadoop/hive/llap/tez/LlapProtocolClientProxy.java
 
b/llap-client/src/java/org/apache/hadoop/hive/llap/tez/LlapProtocolClientProxy.java
index e8d4148..f48a1cb 100644
--- 
a/llap-client/src/java/org/apache/hadoop/hive/llap/tez/LlapProtocolClientProxy.java
+++ 
b/llap-client/src/java/org/apache/hadoop/hive/llap/tez/LlapProtocolClientProxy.java
@@ -139,10 +139,8 @@ public class LlapProtocolClientProxy extends 
AbstractService {
 requestManager.queueRequest(new SubmitWorkCallable(nodeId, request, 
callback));
   }
 
-  public void sendSourceStateUpdate(final SourceStateUpdatedRequestProto 
request, final String host,
-final int port,
+  public void sendSourceStateUpdate(final SourceStateUpdatedRequestProto 
request, final LlapNodeId nodeId,
 final 
ExecuteRequestCallback callback) {
-LlapNodeId nodeId = LlapNodeId.getInstance(host, port);
 requestManager.queueRequest(
 new SendSourceStateUpdateCallable(nodeId, request, callback));
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/d94e8d08/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
--
diff --git 
a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
 
b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
index 456121b..799367b 100644
--- 
a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
+++ 
b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java
@@ -150,7 +150,7 @@ public class LlapTaskCommunicator extends 
TezTaskCommunicatorImpl {
 super.initialize();
 Configuration conf = getConf();
 int numThreads = HiveConf.getIntVar(conf, 
ConfVars.LLAP_DAEMON_COMMUNICATOR_NUM_THREADS);
-this.communicator = new LlapProtocolClientProxy(numThreads, conf, token);
+this.communicator = createLlapProtocolClientProxy(numThreads, conf);
 this.deleteDelayOnDagComplete = HiveConf.getTimeVar(
 conf, ConfVars.LLAP_FILE_CLEANUP_DELAY_SECONDS, TimeUnit.SECONDS);
 LOG.info("Running LlapTaskCommunicator with "
@@ -205,6 +205,10 @@ public class LlapTaskCommunicator extends 
TezTaskCommunicatorImpl {
 }
   }
 
+  protected LlapProtocolClientProxy createLlapProtocolClientProxy(int 
numThreads, Configuration conf) {
+return new LlapProtocolClientProxy(numThreads, conf, token);
+  }
+
   @Override
   public void registerRunningContainer(ContainerId containerId, String 
hostname, int port) {
 super.registerRunningContainer(containerId, hostname, port);
@@ -413,9 +417,9 @@ public class LlapTaskCommunicator extends 
TezTaskCommunicatorImpl {
 .sourceStateUpdated(vertexStateUpdate.getVertexName(), 
vertexStateUpdate.getVertexState());
   }
 
-  public void sendStateUpdate(final String host, final int port,
+  public void sendStateUpdate(final LlapNodeId nodeId,
   final SourceStateUpdatedRequestProto request) {
-communicator.sendSourceStateUpdate(request, host, port,
+communicator.sendSourceStateUpdate(request, nodeId,
 new 
LlapProtocolClientProxy.ExecuteRequestCallback()
 {
   @Override
   public void setResponse(SourceStateUpdatedResponseProto response) {
@@ -423,12 +427,29 @@ public class LlapTaskCommunicator extends 
TezTaskCommunicatorImpl {
 
   @Override
   public void indicateError(Throwable t) {
-// TODO HIVE-10280.
-// 

[13/24] hive git commit: HIVE-13401:Kerberized HS2 with LDAP auth enabled fails kerberos/delegation token authentication (Chaoyu Tang, reviewed by Szehon Ho)

2016-04-04 Thread sseth
HIVE-13401:Kerberized HS2 with LDAP auth enabled fails kerberos/delegation 
token authentication (Chaoyu Tang, reviewed by Szehon Ho)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6a1f8a83
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6a1f8a83
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6a1f8a83

Branch: refs/heads/llap
Commit: 6a1f8a835e13d0e0d3500fce02da8c14de34a023
Parents: d94e8d0
Author: ctang 
Authored: Sat Apr 2 21:26:22 2016 -0400
Committer: ctang 
Committed: Sat Apr 2 21:26:22 2016 -0400

--
 .../org/apache/hive/minikdc/MiniHiveKdc.java|  46 +++--
 .../minikdc/TestJdbcNonKrbSASLWithMiniKdc.java  | 103 +++
 .../org/apache/hive/jdbc/miniHS2/MiniHS2.java   |  14 ++-
 .../hive/service/auth/HiveAuthFactory.java  |  13 ++-
 .../service/cli/thrift/ThriftCLIService.java|  15 +--
 5 files changed, 162 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/6a1f8a83/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/MiniHiveKdc.java
--
diff --git 
a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/MiniHiveKdc.java 
b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/MiniHiveKdc.java
index 4e3a9c5..bbec37e 100644
--- a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/MiniHiveKdc.java
+++ b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/MiniHiveKdc.java
@@ -50,6 +50,7 @@ public class MiniHiveKdc {
   public static String HIVE_TEST_USER_1 = "user1";
   public static String HIVE_TEST_USER_2 = "user2";
   public static String HIVE_TEST_SUPER_USER = "superuser";
+  public static String AUTHENTICATION_TYPE = "KERBEROS";
 
   private final MiniKdc miniKdc;
   private final File workDir;
@@ -170,16 +171,29 @@ public class MiniHiveKdc {
* @throws Exception
*/
   public static MiniHS2 getMiniHS2WithKerb(MiniHiveKdc miniHiveKdc, HiveConf 
hiveConf) throws Exception {
-String hivePrincipal =
-
miniHiveKdc.getFullyQualifiedServicePrincipal(MiniHiveKdc.HIVE_SERVICE_PRINCIPAL);
-String hiveKeytab = miniHiveKdc.getKeyTabFile(
-
miniHiveKdc.getServicePrincipalForUser(MiniHiveKdc.HIVE_SERVICE_PRINCIPAL));
-
-return new MiniHS2.Builder().withConf(hiveConf).
-withMiniKdc(hivePrincipal, hiveKeytab).build();
+return getMiniHS2WithKerb(miniHiveKdc, hiveConf, AUTHENTICATION_TYPE);
   }
 
   /**
+  * Create a MiniHS2 with the hive service principal and keytab in MiniHiveKdc
+  * @param miniHiveKdc
+  * @param hiveConf
+  * @param authType
+  * @return new MiniHS2 instance
+  * @throws Exception
+  */
+ public static MiniHS2 getMiniHS2WithKerb(MiniHiveKdc miniHiveKdc, HiveConf 
hiveConf,
+ String authType) throws Exception {
+   String hivePrincipal =
+   
miniHiveKdc.getFullyQualifiedServicePrincipal(MiniHiveKdc.HIVE_SERVICE_PRINCIPAL);
+   String hiveKeytab = miniHiveKdc.getKeyTabFile(
+   
miniHiveKdc.getServicePrincipalForUser(MiniHiveKdc.HIVE_SERVICE_PRINCIPAL));
+
+   return new MiniHS2.Builder().withConf(hiveConf).withMiniKdc(hivePrincipal, 
hiveKeytab).
+   withAuthenticationType(authType).build();
+ }
+
+  /**
* Create a MiniHS2 with the hive service principal and keytab in MiniHiveKdc
* @param miniHiveKdc
* @param hiveConf
@@ -187,12 +201,26 @@ public class MiniHiveKdc {
* @throws Exception
*/
   public static MiniHS2 getMiniHS2WithKerbWithRemoteHMS(MiniHiveKdc 
miniHiveKdc, HiveConf hiveConf) throws Exception {
+return getMiniHS2WithKerbWithRemoteHMS(miniHiveKdc, hiveConf, 
AUTHENTICATION_TYPE);
+  }
+
+  /**
+   * Create a MiniHS2 with the hive service principal and keytab in 
MiniHiveKdc. It uses remote HMS
+   * and can support a different Sasl authType
+   * @param miniHiveKdc
+   * @param hiveConf
+   * @param authType
+   * @return new MiniHS2 instance
+   * @throws Exception
+   */
+  public static MiniHS2 getMiniHS2WithKerbWithRemoteHMS(MiniHiveKdc 
miniHiveKdc, HiveConf hiveConf,
+  String authType) throws Exception {
 String hivePrincipal =
 
miniHiveKdc.getFullyQualifiedServicePrincipal(MiniHiveKdc.HIVE_SERVICE_PRINCIPAL);
 String hiveKeytab = miniHiveKdc.getKeyTabFile(
 
miniHiveKdc.getServicePrincipalForUser(MiniHiveKdc.HIVE_SERVICE_PRINCIPAL));
 
 return new MiniHS2.Builder().withConf(hiveConf).withRemoteMetastore().
-withMiniKdc(hivePrincipal, hiveKeytab).build();
+withMiniKdc(hivePrincipal, 
hiveKeytab).withAuthenticationType(authType).build();
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/6a1f8a83/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcNonKrbSASLWithMiniKdc.java

[20/24] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)

2016-04-04 Thread sseth
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-cpp/ThriftHive.cpp
--
diff --git a/service/src/gen/thrift/gen-cpp/ThriftHive.cpp 
b/service/src/gen/thrift/gen-cpp/ThriftHive.cpp
new file mode 100644
index 000..a5448f0
--- /dev/null
+++ b/service/src/gen/thrift/gen-cpp/ThriftHive.cpp
@@ -0,0 +1,3544 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+#include "ThriftHive.h"
+
+namespace Apache { namespace Hadoop { namespace Hive {
+
+
+ThriftHive_execute_args::~ThriftHive_execute_args() throw() {
+}
+
+
+uint32_t ThriftHive_execute_args::read(::apache::thrift::protocol::TProtocol* 
iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+xfer += iprot->readFieldBegin(fname, ftype, fid);
+if (ftype == ::apache::thrift::protocol::T_STOP) {
+  break;
+}
+switch (fid)
+{
+  case 1:
+if (ftype == ::apache::thrift::protocol::T_STRING) {
+  xfer += iprot->readString(this->query);
+  this->__isset.query = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  default:
+xfer += iprot->skip(ftype);
+break;
+}
+xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHive_execute_args::write(::apache::thrift::protocol::TProtocol* 
oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHive_execute_args");
+
+  xfer += oprot->writeFieldBegin("query", 
::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->query);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHive_execute_pargs::~ThriftHive_execute_pargs() throw() {
+}
+
+
+uint32_t 
ThriftHive_execute_pargs::write(::apache::thrift::protocol::TProtocol* oprot) 
const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHive_execute_pargs");
+
+  xfer += oprot->writeFieldBegin("query", 
::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString((*(this->query)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHive_execute_result::~ThriftHive_execute_result() throw() {
+}
+
+
+uint32_t 
ThriftHive_execute_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+xfer += iprot->readFieldBegin(fname, ftype, fid);
+if (ftype == ::apache::thrift::protocol::T_STOP) {
+  break;
+}
+switch (fid)
+{
+  case 1:
+if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+  xfer += this->ex.read(iprot);
+  this->__isset.ex = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
+  default:
+xfer += iprot->skip(ftype);
+break;
+}
+xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t 
ThriftHive_execute_result::write(::apache::thrift::protocol::TProtocol* oprot) 
const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("ThriftHive_execute_result");
+
+  if (this->__isset.ex) {
+xfer += oprot->writeFieldBegin("ex", ::apache::thrift::protocol::T_STRUCT, 
1);
+xfer += this->ex.write(oprot);
+xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHive_execute_presult::~ThriftHive_execute_presult() throw() {
+}
+
+
+uint32_t 
ThriftHive_execute_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+xfer += iprot->readFieldBegin(fname, ftype, fid);
+if (ftype == ::apache::thrift::protocol::T_STOP) {
+  break;
+}
+switch (fid)
+{
+  case 1:
+if (ftype == 

[14/24] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)

2016-04-04 Thread sseth
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-py/hive_service/ThriftHive.py
--
diff --git a/service/src/gen/thrift/gen-py/hive_service/ThriftHive.py 
b/service/src/gen/thrift/gen-py/hive_service/ThriftHive.py
new file mode 100644
index 000..978c2a3
--- /dev/null
+++ b/service/src/gen/thrift/gen-py/hive_service/ThriftHive.py
@@ -0,0 +1,1674 @@
+#
+# Autogenerated by Thrift Compiler (0.9.3)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+#  options string: py
+#
+
+from thrift.Thrift import TType, TMessageType, TException, 
TApplicationException
+import hive_metastore.ThriftHiveMetastore
+import logging
+from ttypes import *
+from thrift.Thrift import TProcessor
+from thrift.transport import TTransport
+from thrift.protocol import TBinaryProtocol, TProtocol
+try:
+  from thrift.protocol import fastbinary
+except:
+  fastbinary = None
+
+
+class Iface(hive_metastore.ThriftHiveMetastore.Iface):
+  def execute(self, query):
+"""
+Parameters:
+ - query
+"""
+pass
+
+  def fetchOne(self):
+pass
+
+  def fetchN(self, numRows):
+"""
+Parameters:
+ - numRows
+"""
+pass
+
+  def fetchAll(self):
+pass
+
+  def getSchema(self):
+pass
+
+  def getThriftSchema(self):
+pass
+
+  def getClusterStatus(self):
+pass
+
+  def getQueryPlan(self):
+pass
+
+  def clean(self):
+pass
+
+
+class Client(hive_metastore.ThriftHiveMetastore.Client, Iface):
+  def __init__(self, iprot, oprot=None):
+hive_metastore.ThriftHiveMetastore.Client.__init__(self, iprot, oprot)
+
+  def execute(self, query):
+"""
+Parameters:
+ - query
+"""
+self.send_execute(query)
+self.recv_execute()
+
+  def send_execute(self, query):
+self._oprot.writeMessageBegin('execute', TMessageType.CALL, self._seqid)
+args = execute_args()
+args.query = query
+args.write(self._oprot)
+self._oprot.writeMessageEnd()
+self._oprot.trans.flush()
+
+  def recv_execute(self):
+iprot = self._iprot
+(fname, mtype, rseqid) = iprot.readMessageBegin()
+if mtype == TMessageType.EXCEPTION:
+  x = TApplicationException()
+  x.read(iprot)
+  iprot.readMessageEnd()
+  raise x
+result = execute_result()
+result.read(iprot)
+iprot.readMessageEnd()
+if result.ex is not None:
+  raise result.ex
+return
+
+  def fetchOne(self):
+self.send_fetchOne()
+return self.recv_fetchOne()
+
+  def send_fetchOne(self):
+self._oprot.writeMessageBegin('fetchOne', TMessageType.CALL, self._seqid)
+args = fetchOne_args()
+args.write(self._oprot)
+self._oprot.writeMessageEnd()
+self._oprot.trans.flush()
+
+  def recv_fetchOne(self):
+iprot = self._iprot
+(fname, mtype, rseqid) = iprot.readMessageBegin()
+if mtype == TMessageType.EXCEPTION:
+  x = TApplicationException()
+  x.read(iprot)
+  iprot.readMessageEnd()
+  raise x
+result = fetchOne_result()
+result.read(iprot)
+iprot.readMessageEnd()
+if result.success is not None:
+  return result.success
+if result.ex is not None:
+  raise result.ex
+raise TApplicationException(TApplicationException.MISSING_RESULT, 
"fetchOne failed: unknown result")
+
+  def fetchN(self, numRows):
+"""
+Parameters:
+ - numRows
+"""
+self.send_fetchN(numRows)
+return self.recv_fetchN()
+
+  def send_fetchN(self, numRows):
+self._oprot.writeMessageBegin('fetchN', TMessageType.CALL, self._seqid)
+args = fetchN_args()
+args.numRows = numRows
+args.write(self._oprot)
+self._oprot.writeMessageEnd()
+self._oprot.trans.flush()
+
+  def recv_fetchN(self):
+iprot = self._iprot
+(fname, mtype, rseqid) = iprot.readMessageBegin()
+if mtype == TMessageType.EXCEPTION:
+  x = TApplicationException()
+  x.read(iprot)
+  iprot.readMessageEnd()
+  raise x
+result = fetchN_result()
+result.read(iprot)
+iprot.readMessageEnd()
+if result.success is not None:
+  return result.success
+if result.ex is not None:
+  raise result.ex
+raise TApplicationException(TApplicationException.MISSING_RESULT, "fetchN 
failed: unknown result")
+
+  def fetchAll(self):
+self.send_fetchAll()
+return self.recv_fetchAll()
+
+  def send_fetchAll(self):
+self._oprot.writeMessageBegin('fetchAll', TMessageType.CALL, self._seqid)
+args = fetchAll_args()
+args.write(self._oprot)
+self._oprot.writeMessageEnd()
+self._oprot.trans.flush()
+
+  def recv_fetchAll(self):
+iprot = self._iprot
+(fname, mtype, rseqid) = iprot.readMessageBegin()
+if mtype == TMessageType.EXCEPTION:
+  x = TApplicationException()
+  x.read(iprot)
+  iprot.readMessageEnd()
+  raise x
+result = fetchAll_result()
+result.read(iprot)
+iprot.readMessageEnd()
+if result.success is not None:
+ 

[19/24] hive git commit: HIVE-13388 : Fix inconsistent content due to Thrift changes (Wei Zheng, reviewed by Sergey Shelukhin)

2016-04-04 Thread sseth
http://git-wip-us.apache.org/repos/asf/hive/blob/98303635/service/src/gen/thrift/gen-cpp/ThriftHive.h
--
diff --git a/service/src/gen/thrift/gen-cpp/ThriftHive.h 
b/service/src/gen/thrift/gen-cpp/ThriftHive.h
new file mode 100644
index 000..902bd4b
--- /dev/null
+++ b/service/src/gen/thrift/gen-cpp/ThriftHive.h
@@ -0,0 +1,1224 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+#ifndef ThriftHive_H
+#define ThriftHive_H
+
+#include 
+#include 
+#include "hive_service_types.h"
+#include "ThriftHiveMetastore.h"
+
+namespace Apache { namespace Hadoop { namespace Hive {
+
+#ifdef _WIN32
+  #pragma warning( push )
+  #pragma warning (disable : 4250 ) //inheriting methods via dominance 
+#endif
+
+class ThriftHiveIf : virtual public  
::Apache::Hadoop::Hive::ThriftHiveMetastoreIf {
+ public:
+  virtual ~ThriftHiveIf() {}
+  virtual void execute(const std::string& query) = 0;
+  virtual void fetchOne(std::string& _return) = 0;
+  virtual void fetchN(std::vector & _return, const int32_t 
numRows) = 0;
+  virtual void fetchAll(std::vector & _return) = 0;
+  virtual void getSchema( ::Apache::Hadoop::Hive::Schema& _return) = 0;
+  virtual void getThriftSchema( ::Apache::Hadoop::Hive::Schema& _return) = 0;
+  virtual void getClusterStatus(HiveClusterStatus& _return) = 0;
+  virtual void getQueryPlan( ::Apache::Hadoop::Hive::QueryPlan& _return) = 0;
+  virtual void clean() = 0;
+};
+
+class ThriftHiveIfFactory : virtual public  
::Apache::Hadoop::Hive::ThriftHiveMetastoreIfFactory {
+ public:
+  typedef ThriftHiveIf Handler;
+
+  virtual ~ThriftHiveIfFactory() {}
+
+  virtual ThriftHiveIf* getHandler(const ::apache::thrift::TConnectionInfo& 
connInfo) = 0;
+  virtual void releaseHandler( ::facebook::fb303::FacebookServiceIf* /* 
handler */) = 0;
+};
+
+class ThriftHiveIfSingletonFactory : virtual public ThriftHiveIfFactory {
+ public:
+  ThriftHiveIfSingletonFactory(const boost::shared_ptr& iface) : 
iface_(iface) {}
+  virtual ~ThriftHiveIfSingletonFactory() {}
+
+  virtual ThriftHiveIf* getHandler(const ::apache::thrift::TConnectionInfo&) {
+return iface_.get();
+  }
+  virtual void releaseHandler( ::facebook::fb303::FacebookServiceIf* /* 
handler */) {}
+
+ protected:
+  boost::shared_ptr iface_;
+};
+
+class ThriftHiveNull : virtual public ThriftHiveIf , virtual public  
::Apache::Hadoop::Hive::ThriftHiveMetastoreNull {
+ public:
+  virtual ~ThriftHiveNull() {}
+  void execute(const std::string& /* query */) {
+return;
+  }
+  void fetchOne(std::string& /* _return */) {
+return;
+  }
+  void fetchN(std::vector & /* _return */, const int32_t /* 
numRows */) {
+return;
+  }
+  void fetchAll(std::vector & /* _return */) {
+return;
+  }
+  void getSchema( ::Apache::Hadoop::Hive::Schema& /* _return */) {
+return;
+  }
+  void getThriftSchema( ::Apache::Hadoop::Hive::Schema& /* _return */) {
+return;
+  }
+  void getClusterStatus(HiveClusterStatus& /* _return */) {
+return;
+  }
+  void getQueryPlan( ::Apache::Hadoop::Hive::QueryPlan& /* _return */) {
+return;
+  }
+  void clean() {
+return;
+  }
+};
+
+typedef struct _ThriftHive_execute_args__isset {
+  _ThriftHive_execute_args__isset() : query(false) {}
+  bool query :1;
+} _ThriftHive_execute_args__isset;
+
+class ThriftHive_execute_args {
+ public:
+
+  ThriftHive_execute_args(const ThriftHive_execute_args&);
+  ThriftHive_execute_args& operator=(const ThriftHive_execute_args&);
+  ThriftHive_execute_args() : query() {
+  }
+
+  virtual ~ThriftHive_execute_args() throw();
+  std::string query;
+
+  _ThriftHive_execute_args__isset __isset;
+
+  void __set_query(const std::string& val);
+
+  bool operator == (const ThriftHive_execute_args & rhs) const
+  {
+if (!(query == rhs.query))
+  return false;
+return true;
+  }
+  bool operator != (const ThriftHive_execute_args ) const {
+return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHive_execute_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHive_execute_pargs {
+ public:
+
+
+  virtual ~ThriftHive_execute_pargs() throw();
+  const std::string* query;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHive_execute_result__isset {
+  _ThriftHive_execute_result__isset() : ex(false) {}
+  bool ex :1;
+} _ThriftHive_execute_result__isset;
+
+class ThriftHive_execute_result {
+ public:
+
+  ThriftHive_execute_result(const ThriftHive_execute_result&);
+  ThriftHive_execute_result& operator=(const ThriftHive_execute_result&);
+  ThriftHive_execute_result() {
+  }
+
+  virtual ~ThriftHive_execute_result() throw();
+  HiveServerException ex;
+
+  _ThriftHive_execute_result__isset __isset;
+
+  void __set_ex(const