This is an automated email from the ASF dual-hosted git repository.
iwasakims pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/bigtop.git
The following commit(s) were added to refs/heads/master by this push:
new 249fab0 BIGTOP-3480. Bump Hive to 3.1.2. (#718)
249fab0 is described below
commit 249fab074f5fae0164f5dca977802f50c43f8803
Author: Kengo Seki <[email protected]>
AuthorDate: Fri Feb 5 20:03:20 2021 +0900
BIGTOP-3480. Bump Hive to 3.1.2. (#718)
---
bigtop-packages/src/common/hive/do-component-build | 4 +-
.../src/common/hive/patch0-HIVE-16302.diff | 16 -
.../src/common/hive/patch0-HIVE-22779.diff | 98 ++++
.../src/common/hive/patch1-HIVE-22839.diff | 28 --
.../src/common/hive/patch1-HIVE-23190.diff | 43 ++
.../src/common/hive/patch2-HIVE-18436.diff | 73 ---
.../src/common/hive/patch2-HIVE-20201.diff | 41 ++
.../src/common/hive/patch3-HIVE-21569.diff | 257 ++++++++++
.../src/common/hive/patch3-HIVE-23303.diff | 37 --
.../hive/patch4-HIVE-16402-hadoop-2.10.0.diff | 251 ----------
.../src/common/hive/patch4-HIVE-19316.diff | 533 +++++++++++++++++++++
.../src/common/hive/patch5-HIVE-17368.diff | 293 -----------
bigtop-packages/src/deb/hive/rules | 3 -
bigtop.bom | 4 +-
14 files changed, 977 insertions(+), 704 deletions(-)
diff --git a/bigtop-packages/src/common/hive/do-component-build
b/bigtop-packages/src/common/hive/do-component-build
index ea1b4d9..2674d7c 100644
--- a/bigtop-packages/src/common/hive/do-component-build
+++ b/bigtop-packages/src/common/hive/do-component-build
@@ -23,7 +23,9 @@ HIVE_MAVEN_OPTS=" -Dhbase.version=$HBASE_VERSION \
-Dhadoop.version=$HADOOP_VERSION \
-DskipTests \
-Dtez.version=${TEZ_VERSION} \
--Dspark.version=${SPARK_VERSION}
+-Dspark.version=${SPARK_VERSION} \
+-Dscala.binary.version=${SCALA_VERSION%.*} \
+-Dscala.version=${SCALA_VERSION}
"
# Include common Maven Deployment logic
diff --git a/bigtop-packages/src/common/hive/patch0-HIVE-16302.diff
b/bigtop-packages/src/common/hive/patch0-HIVE-16302.diff
deleted file mode 100644
index 96cd99e..0000000
--- a/bigtop-packages/src/common/hive/patch0-HIVE-16302.diff
+++ /dev/null
@@ -1,16 +0,0 @@
-diff --git a/shims/common/pom.xml b/shims/common/pom.xml
-index 19821cd..e2245df 100644
---- a/shims/common/pom.xml
-+++ b/shims/common/pom.xml
-@@ -86,6 +86,11 @@
- </exclusion>
- </exclusions>
- </dependency>
-+ <dependency>
-+ <groupId>junit</groupId>
-+ <artifactId>junit</artifactId>
-+ <scope>test</scope>
-+ </dependency>
- </dependencies>
-
- <build>
diff --git a/bigtop-packages/src/common/hive/patch0-HIVE-22779.diff
b/bigtop-packages/src/common/hive/patch0-HIVE-22779.diff
new file mode 100644
index 0000000..7e55c8c
--- /dev/null
+++ b/bigtop-packages/src/common/hive/patch0-HIVE-22779.diff
@@ -0,0 +1,98 @@
+diff --git a/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUtils.java
b/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUtils.java
+index 4add29027d..130d98a144 100644
+--- a/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUtils.java
++++ b/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUtils.java
+@@ -26,12 +26,12 @@
+ import java.lang.reflect.Modifier;
+ import java.util.Set;
+
++import com.google.common.collect.Sets;
+ import org.apache.commons.lang.reflect.FieldUtils;
+ import org.apache.hadoop.hive.conf.HiveConf;
+ import org.apache.hadoop.hive.ql.plan.ColStatistics.Range;
+ import org.apache.hadoop.hive.serde.serdeConstants;
+ import org.junit.Test;
+-import org.spark_project.guava.collect.Sets;
+
+ public class TestStatsUtils {
+
+diff --git
a/spark-client/src/main/java/org/apache/hive/spark/client/metrics/ShuffleWriteMetrics.java
b/spark-client/src/main/java/org/apache/hive/spark/client/metrics/ShuffleWriteMetrics.java
+index 64a4b86042..d27b1700cd 100644
+---
a/spark-client/src/main/java/org/apache/hive/spark/client/metrics/ShuffleWriteMetrics.java
++++
b/spark-client/src/main/java/org/apache/hive/spark/client/metrics/ShuffleWriteMetrics.java
+@@ -47,8 +47,8 @@ public ShuffleWriteMetrics(
+ }
+
+ public ShuffleWriteMetrics(TaskMetrics metrics) {
+- this(metrics.shuffleWriteMetrics().shuffleBytesWritten(),
+- metrics.shuffleWriteMetrics().shuffleWriteTime());
++ this(metrics.shuffleWriteMetrics().bytesWritten(),
++ metrics.shuffleWriteMetrics().writeTime());
+ }
+
+ }
+diff --git
a/spark-client/src/main/java/org/apache/hive/spark/counter/SparkCounter.java
b/spark-client/src/main/java/org/apache/hive/spark/counter/SparkCounter.java
+index d0eb1fa446..bb494b6fb4 100644
+---
a/spark-client/src/main/java/org/apache/hive/spark/counter/SparkCounter.java
++++
b/spark-client/src/main/java/org/apache/hive/spark/counter/SparkCounter.java
+@@ -19,15 +19,14 @@
+
+ import java.io.Serializable;
+
+-import org.apache.spark.Accumulator;
+-import org.apache.spark.AccumulatorParam;
+ import org.apache.spark.api.java.JavaSparkContext;
++import org.apache.spark.util.LongAccumulator;
+
+ public class SparkCounter implements Serializable {
+
+ private String name;
+ private String displayName;
+- private Accumulator<Long> accumulator;
++ private LongAccumulator accumulator;
+
+ // Values of accumulators can only be read on the SparkContext side. This
field is used when
+ // creating a snapshot to be sent to the RSC client.
+@@ -55,9 +54,16 @@ public SparkCounter(
+
+ this.name = name;
+ this.displayName = displayName;
+- LongAccumulatorParam longParam = new LongAccumulatorParam();
+ String accumulatorName = groupName + "_" + name;
+- this.accumulator = sparkContext.accumulator(initValue, accumulatorName,
longParam);
++ this.accumulator = createAccumulator(sparkContext, accumulatorName,
initValue);
++ }
++
++ private LongAccumulator createAccumulator(
++ JavaSparkContext sparkContext, String accumulatorName, long initValue) {
++ LongAccumulator accumulator = new LongAccumulator();
++ accumulator.setValue(initValue);
++ sparkContext.sc().register(accumulator, accumulatorName);
++ return accumulator;
+ }
+
+ public long getValue() {
+@@ -87,23 +93,4 @@ public void setDisplayName(String displayName) {
+ SparkCounter snapshot() {
+ return new SparkCounter(name, displayName, accumulator.value());
+ }
+-
+- class LongAccumulatorParam implements AccumulatorParam<Long> {
+-
+- @Override
+- public Long addAccumulator(Long t1, Long t2) {
+- return t1 + t2;
+- }
+-
+- @Override
+- public Long addInPlace(Long r1, Long r2) {
+- return r1 + r2;
+- }
+-
+- @Override
+- public Long zero(Long initialValue) {
+- return 0L;
+- }
+- }
+-
+ }
diff --git a/bigtop-packages/src/common/hive/patch1-HIVE-22839.diff
b/bigtop-packages/src/common/hive/patch1-HIVE-22839.diff
deleted file mode 100644
index e2f75e7..0000000
--- a/bigtop-packages/src/common/hive/patch1-HIVE-22839.diff
+++ /dev/null
@@ -1,28 +0,0 @@
-diff --git
a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/MockUtils.java
b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/MockUtils.java
-index 784648a..83c7781 100644
---- a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/MockUtils.java
-+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/MockUtils.java
-@@ -29,6 +29,8 @@
- import org.apache.hadoop.hbase.client.Result;
- import org.apache.hadoop.hbase.client.ResultScanner;
- import org.apache.hadoop.hbase.client.Scan;
-+import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
-+
- import org.apache.hadoop.hive.conf.HiveConf;
- import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
- import org.apache.hadoop.hive.metastore.FileFormatProxy;
-@@ -157,6 +159,14 @@ public void close() {
- public Iterator<Result> iterator() {
- return iter;
- }
-+
-+ public ScanMetrics getScanMetrics() {
-+ return null;
-+ }
-+
-+ public boolean renewLease() {
-+ return true;
-+ }
- };
- }
- });
diff --git a/bigtop-packages/src/common/hive/patch1-HIVE-23190.diff
b/bigtop-packages/src/common/hive/patch1-HIVE-23190.diff
new file mode 100644
index 0000000..0eea57d
--- /dev/null
+++ b/bigtop-packages/src/common/hive/patch1-HIVE-23190.diff
@@ -0,0 +1,43 @@
+diff --git
a/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/IndexCache.java
b/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/IndexCache.java
+index 4de03f232d..d8c18776ec 100644
+---
a/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/IndexCache.java
++++
b/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/IndexCache.java
+@@ -25,6 +25,7 @@
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ import org.apache.hadoop.conf.Configuration;
++import org.apache.hadoop.fs.FileSystem;
+ import org.apache.hadoop.fs.Path;
+ import org.apache.tez.runtime.library.common.Constants;
+ import org.apache.tez.runtime.library.common.sort.impl.TezIndexRecord;
+@@ -43,10 +44,21 @@
+ private final LinkedBlockingQueue<String> queue =
+ new LinkedBlockingQueue<String>();
+
++ private FileSystem fs;
++
+ public IndexCache(Configuration conf) {
+ this.conf = conf;
+ totalMemoryAllowed = 10 * 1024 * 1024;
+ LOG.info("IndexCache created with max memory = " + totalMemoryAllowed);
++ initLocalFs();
++ }
++
++ private void initLocalFs() {
++ try {
++ this.fs = FileSystem.getLocal(conf).getRaw();
++ } catch (IOException e) {
++ throw new RuntimeException(e);
++ }
+ }
+
+ /**
+@@ -118,7 +130,7 @@ private IndexInformation readIndexFileToCache(Path
indexFileName,
+ LOG.debug("IndexCache MISS: MapId " + mapId + " not found") ;
+ TezSpillRecord tmp = null;
+ try {
+- tmp = new TezSpillRecord(indexFileName, conf, expectedIndexOwner);
++ tmp = new TezSpillRecord(indexFileName, fs, expectedIndexOwner);
+ } catch (Throwable e) {
+ tmp = new TezSpillRecord(0);
+ cache.remove(mapId);
diff --git a/bigtop-packages/src/common/hive/patch2-HIVE-18436.diff
b/bigtop-packages/src/common/hive/patch2-HIVE-18436.diff
deleted file mode 100644
index 442629d..0000000
--- a/bigtop-packages/src/common/hive/patch2-HIVE-18436.diff
+++ /dev/null
@@ -1,73 +0,0 @@
-diff --git a/pom.xml b/pom.xml
-index f91f7f43a6..71a889746a 100644
---- a/pom.xml
-+++ b/pom.xml
-@@ -178,7 +178,7 @@
- <orc.version>1.3.4</orc.version>
- <mockito-all.version>1.9.5</mockito-all.version>
- <mina.version>2.0.0-M5</mina.version>
-- <netty.version>4.0.52.Final</netty.version>
-+ <netty.version>4.1.17.Final</netty.version>
- <parquet.version>1.8.1</parquet.version>
- <pig.version>0.16.0</pig.version>
- <protobuf.version>2.5.0</protobuf.version>
-@@ -189,7 +189,7 @@
- <tez.version>0.8.4</tez.version>
- <slider.version>0.90.2-incubating</slider.version>
- <super-csv.version>2.2.0</super-csv.version>
-- <spark.version>2.0.0</spark.version>
-+ <spark.version>2.3.0</spark.version>
- <scala.binary.version>2.11</scala.binary.version>
- <scala.version>2.11.8</scala.version>
- <tempus-fugit.version>1.1</tempus-fugit.version>
-diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/LocalHiveSparkClient.java
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/LocalHiveSparkClient.java
-index beeafd0672..c871ba6c33 100644
----
a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/LocalHiveSparkClient.java
-+++
b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/LocalHiveSparkClient.java
-@@ -84,7 +84,7 @@ public static synchronized LocalHiveSparkClient
getInstance(SparkConf sparkConf)
- private LocalHiveSparkClient(SparkConf sparkConf) {
- sc = new JavaSparkContext(sparkConf);
- jobMetricsListener = new JobMetricsListener();
-- sc.sc().listenerBus().addListener(jobMetricsListener);
-+ sc.sc().addSparkListener(jobMetricsListener);
- }
-
- @Override
-diff --git
a/spark-client/src/test/java/org/apache/hive/spark/client/rpc/TestKryoMessageCodec.java
b/spark-client/src/test/java/org/apache/hive/spark/client/rpc/TestKryoMessageCodec.java
-index 24858d7cef..fb736471b2 100644
----
a/spark-client/src/test/java/org/apache/hive/spark/client/rpc/TestKryoMessageCodec.java
-+++
b/spark-client/src/test/java/org/apache/hive/spark/client/rpc/TestKryoMessageCodec.java
-@@ -72,7 +72,8 @@ public void testEmbeddedChannel() throws Exception {
- c.writeAndFlush(MESSAGE);
- assertEquals(1, c.outboundMessages().size());
-
assertFalse(MESSAGE.getClass().equals(c.outboundMessages().peek().getClass()));
-- c.writeInbound(c.readOutbound());
-+ Object readOutboundResult = c.readOutbound();
-+ c.writeInbound(readOutboundResult);
- assertEquals(1, c.inboundMessages().size());
- assertEquals(MESSAGE, c.readInbound());
- c.close();
-diff --git
a/spark-client/src/test/java/org/apache/hive/spark/client/rpc/TestRpc.java
b/spark-client/src/test/java/org/apache/hive/spark/client/rpc/TestRpc.java
-index 5a4801c5fa..21b3d4e494 100644
---- a/spark-client/src/test/java/org/apache/hive/spark/client/rpc/TestRpc.java
-+++ b/spark-client/src/test/java/org/apache/hive/spark/client/rpc/TestRpc.java
-@@ -287,7 +287,8 @@ private void transfer(Rpc serverRpc, Rpc clientRpc) {
-
- int count = 0;
- while (!client.outboundMessages().isEmpty()) {
-- server.writeInbound(client.readOutbound());
-+ Object readOutboundResult = client.readOutbound();
-+ server.writeInbound(readOutboundResult);
- count++;
- }
- server.flush();
-@@ -295,7 +296,8 @@ private void transfer(Rpc serverRpc, Rpc clientRpc) {
-
- count = 0;
- while (!server.outboundMessages().isEmpty()) {
-- client.writeInbound(server.readOutbound());
-+ Object readOutboundResult = server.readOutbound();
-+ client.writeInbound(readOutboundResult);
- count++;
- }
- client.flush();
diff --git a/bigtop-packages/src/common/hive/patch2-HIVE-20201.diff
b/bigtop-packages/src/common/hive/patch2-HIVE-20201.diff
new file mode 100644
index 0000000..184c952
--- /dev/null
+++ b/bigtop-packages/src/common/hive/patch2-HIVE-20201.diff
@@ -0,0 +1,41 @@
+From 432c24559bf9f99fe6a8adc619c75f72348a79f1 Mon Sep 17 00:00:00 2001
+From: Mike Drob <[email protected]>
+Date: Wed, 18 Jul 2018 09:24:59 -0500
+Subject: [PATCH] HIVE-20201 Use Apache Commons Base64
+
+---
+ .../apache/hadoop/hive/hbase/HiveHBaseTableSnapshotInputFormat.java | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git
a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableSnapshotInputFormat.java
b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableSnapshotInputFormat.java
+index aedadc29b8..77dff5cf29 100644
+---
a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableSnapshotInputFormat.java
++++
b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableSnapshotInputFormat.java
+@@ -18,6 +18,7 @@
+
+ package org.apache.hadoop.hive.hbase;
+
++import org.apache.commons.codec.binary.Base64;
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.hbase.client.Result;
+ import org.apache.hadoop.hbase.client.Scan;
+@@ -26,7 +27,6 @@
+ import org.apache.hadoop.hbase.mapred.TableSnapshotInputFormat;
+ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+-import org.apache.hadoop.hbase.util.Base64;
+ import org.apache.hadoop.hbase.util.Bytes;
+ import org.apache.hadoop.mapred.FileInputFormat;
+ import org.apache.hadoop.mapred.InputFormat;
+@@ -54,7 +54,7 @@ private static void setColumns(JobConf job) throws
IOException {
+ // Copied from HBase's TableMapreduceUtil since it is not public API
+ static String convertScanToString(Scan scan) throws IOException {
+ ClientProtos.Scan proto = ProtobufUtil.toScan(scan);
+- return Base64.encodeBytes(proto.toByteArray());
++ return Base64.encodeBase64String(proto.toByteArray());
+ }
+
+ @Override
+--
+2.16.1
+
diff --git a/bigtop-packages/src/common/hive/patch3-HIVE-21569.diff
b/bigtop-packages/src/common/hive/patch3-HIVE-21569.diff
new file mode 100644
index 0000000..b893be9
--- /dev/null
+++ b/bigtop-packages/src/common/hive/patch3-HIVE-21569.diff
@@ -0,0 +1,257 @@
+diff --git a/druid-handler/pom.xml b/druid-handler/pom.xml
+index 27f8a9bd9c..bf39f6acfc 100644
+--- a/druid-handler/pom.xml
++++ b/druid-handler/pom.xml
+@@ -30,7 +30,7 @@
+ <properties>
+ <hive.path.to.root>..</hive.path.to.root>
+ <druid.metamx.util.version>1.3.2</druid.metamx.util.version>
+- <druid.guava.version>16.0.1</druid.guava.version>
++ <druid.guava.version>28.1-jre</druid.guava.version>
+ </properties>
+
+ <dependencies>
+diff --git
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidScanQueryRecordReader.java
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidScanQueryRecordReader.java
+index 64c640f45a..6b9f08cd8a 100644
+---
a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidScanQueryRecordReader.java
++++
b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidScanQueryRecordReader.java
+@@ -28,6 +28,7 @@
+ import com.google.common.collect.Iterators;
+
+ import java.io.IOException;
++import java.util.Collections;
+ import java.util.Iterator;
+ import java.util.List;
+
+@@ -43,7 +44,7 @@
+
+ private ScanResultValue current;
+
+- private Iterator<List<Object>> compactedValues = Iterators.emptyIterator();
++ private Iterator<List<Object>> compactedValues =
Collections.emptyIterator();
+
+ @Override
+ protected JavaType getResultTypeDef() {
+diff --git a/itests/qtest-druid/pom.xml b/itests/qtest-druid/pom.xml
+index 8b20ac342f..ecefff3e52 100644
+--- a/itests/qtest-druid/pom.xml
++++ b/itests/qtest-druid/pom.xml
+@@ -41,7 +41,7 @@
+ <druid.jersey.version>1.19.3</druid.jersey.version>
+ <druid.jetty.version>9.3.19.v20170502</druid.jetty.version>
+ <druid.derby.version>10.11.1.1</druid.derby.version>
+- <druid.guava.version>16.0.1</druid.guava.version>
++ <druid.guava.version>28.1-jre</druid.guava.version>
+ <druid.guice.version>4.1.0</druid.guice.version>
+ <kafka.version>0.10.2.0</kafka.version>
+ </properties>
+diff --git
a/llap-common/src/java/org/apache/hadoop/hive/llap/AsyncPbRpcProxy.java
b/llap-common/src/java/org/apache/hadoop/hive/llap/AsyncPbRpcProxy.java
+index ad39963614..475a159e3f 100644
+--- a/llap-common/src/java/org/apache/hadoop/hive/llap/AsyncPbRpcProxy.java
++++ b/llap-common/src/java/org/apache/hadoop/hive/llap/AsyncPbRpcProxy.java
+@@ -171,7 +171,7 @@ public void shutdown() {
+ CallableRequest<T, U> request, LlapNodeId nodeId) {
+ ListenableFuture<U> future = executor.submit(request);
+ Futures.addCallback(future, new ResponseCallback<U>(
+- request.getCallback(), nodeId, this));
++ request.getCallback(), nodeId, this),
MoreExecutors.directExecutor());
+ }
+
+ @VisibleForTesting
+@@ -283,7 +283,7 @@ public void onFailure(Throwable t) {
+ LOG.warn("RequestManager shutdown with error", t);
+ }
+ }
+- });
++ }, MoreExecutors.directExecutor());
+ }
+
+ @Override
+@@ -490,4 +490,4 @@ protected abstract ProtocolType
createProtocolImpl(Configuration config, String
+ protected abstract void shutdownProtocolImpl(ProtocolType proxy);
+
+ protected abstract String getTokenUser(Token<TokenType> token);
+-}
+\ No newline at end of file
++}
+diff --git
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/AMReporter.java
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/AMReporter.java
+index 088a5f33c0..a49920eeac 100644
+---
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/AMReporter.java
++++
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/AMReporter.java
+@@ -174,7 +174,7 @@ public void onFailure(Throwable t) {
+
Thread.getDefaultUncaughtExceptionHandler().uncaughtException(Thread.currentThread(),
t);
+ }
+ }
+- });
++ }, MoreExecutors.directExecutor());
+ // TODO: why is this needed? we could just save the host and port?
+ nodeId = LlapNodeId.getInstance(localAddress.get().getHostName(),
localAddress.get().getPort());
+ LOG.info("AMReporter running with DaemonId: {}, NodeId: {}", daemonId,
nodeId);
+@@ -274,7 +274,7 @@ public void onFailure(Throwable t) {
+ LOG.warn("Failed to send taskKilled for {}. The attempt will likely
time out.",
+ taskAttemptId);
+ }
+- });
++ }, MoreExecutors.directExecutor());
+ }
+
+ public void queryComplete(QueryIdentifier queryIdentifier) {
+@@ -342,7 +342,7 @@ public void onFailure(Throwable t) {
+ amNodeInfo.amNodeId, currentQueryIdentifier, t);
+ queryFailedHandler.queryFailed(currentQueryIdentifier);
+ }
+- });
++ }, MoreExecutors.directExecutor());
+ }
+ }
+ } catch (InterruptedException e) {
+diff --git
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
+index 33ade55ee1..848c42a750 100644
+---
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
++++
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
+@@ -128,7 +128,7 @@ public synchronized void registerTask(RuntimeTask task,
+ sendCounterInterval, maxEventsToGet, requestCounter, containerIdStr,
initialEvent,
+ fragmentRequestId, wmCounters);
+ ListenableFuture<Boolean> future =
heartbeatExecutor.submit(currentCallable);
+- Futures.addCallback(future, new HeartbeatCallback(errorReporter));
++ Futures.addCallback(future, new HeartbeatCallback(errorReporter),
MoreExecutors.directExecutor());
+ }
+
+ /**
+diff --git
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
+index 047a55ccae..fac0f77c6a 100644
+---
a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
++++
b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
+@@ -175,7 +175,7 @@ public TaskExecutorService(int numExecutors, int
waitQueueSize,
+ executionCompletionExecutorService = MoreExecutors.listeningDecorator(
+ executionCompletionExecutorServiceRaw);
+ ListenableFuture<?> future = waitQueueExecutorService.submit(new
WaitQueueWorker());
+- Futures.addCallback(future, new WaitQueueWorkerCallback());
++ Futures.addCallback(future, new WaitQueueWorkerCallback(),
MoreExecutors.directExecutor());
+ }
+
+ private LlapQueueComparatorBase createComparator(
+diff --git
a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
+index 82179645da..b20e8cda65 100644
+---
a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
++++
b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java
+@@ -744,15 +744,17 @@ public Void call() throws Exception {
+ }, 10000L, TimeUnit.MILLISECONDS);
+
+ nodeEnablerFuture = nodeEnabledExecutor.submit(nodeEnablerCallable);
+- Futures.addCallback(nodeEnablerFuture, new
LoggingFutureCallback("NodeEnablerThread", LOG));
++ Futures.addCallback(nodeEnablerFuture, new
LoggingFutureCallback("NodeEnablerThread", LOG),
++ MoreExecutors.directExecutor());
+
+ delayedTaskSchedulerFuture =
+ delayedTaskSchedulerExecutor.submit(delayedTaskSchedulerCallable);
+ Futures.addCallback(delayedTaskSchedulerFuture,
+- new LoggingFutureCallback("DelayedTaskSchedulerThread", LOG));
++ new LoggingFutureCallback("DelayedTaskSchedulerThread", LOG),
MoreExecutors.directExecutor());
+
+ schedulerFuture = schedulerExecutor.submit(schedulerCallable);
+- Futures.addCallback(schedulerFuture, new
LoggingFutureCallback("SchedulerThread", LOG));
++ Futures.addCallback(schedulerFuture, new
LoggingFutureCallback("SchedulerThread", LOG),
++ MoreExecutors.directExecutor());
+
+ registry.start();
+ registry.registerStateChangeListener(new NodeStateChangeListener());
+diff --git a/pom.xml b/pom.xml
+index c06f7f81c1..17dd2cf886 100644
+--- a/pom.xml
++++ b/pom.xml
+@@ -144,7 +144,7 @@
+
<dropwizard-metrics-hadoop-metrics2-reporter.version>0.1.2</dropwizard-metrics-hadoop-metrics2-reporter.version>
+ <druid.version>0.12.0</druid.version>
+ <flatbuffers.version>1.2.0-3f79e055</flatbuffers.version>
+- <guava.version>19.0</guava.version>
++ <guava.version>28.1-jre</guava.version>
+ <groovy.version>2.4.11</groovy.version>
+ <h2database.version>1.3.166</h2database.version>
+ <hadoop.version>3.1.0</hadoop.version>
+diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java
+index 97ba036335..649ef99d9b 100644
+--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java
++++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java
+@@ -27,6 +27,7 @@
+ import com.google.common.util.concurrent.FutureCallback;
+ import com.google.common.util.concurrent.Futures;
+ import com.google.common.util.concurrent.ListenableFuture;
++import com.google.common.util.concurrent.MoreExecutors;
+ import com.google.common.util.concurrent.SettableFuture;
+ import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
+@@ -1092,7 +1093,7 @@ private static int
transferSessionsToDestroy(Collection<WmTezSession> source,
+ }
+
+ private void failOnFutureFailure(ListenableFuture<?> future) {
+- Futures.addCallback(future, FATAL_ERROR_CALLBACK);
++ Futures.addCallback(future, FATAL_ERROR_CALLBACK,
MoreExecutors.directExecutor());
+ }
+
+ private void queueGetRequestOnMasterThread(
+@@ -1925,7 +1926,7 @@ public SessionInitContext(SettableFuture<WmTezSession>
future,
+
+ public void start() throws Exception {
+ ListenableFuture<WmTezSession> getFuture = tezAmPool.getSessionAsync();
+- Futures.addCallback(getFuture, this);
++ Futures.addCallback(getFuture, this, MoreExecutors.directExecutor());
+ }
+
+ @Override
+@@ -1979,7 +1980,7 @@ public void onSuccess(WmTezSession session) {
+ case GETTING: {
+ ListenableFuture<WmTezSession> waitFuture =
session.waitForAmRegistryAsync(
+ amRegistryTimeoutMs, timeoutPool);
+- Futures.addCallback(waitFuture, this);
++ Futures.addCallback(waitFuture, this, MoreExecutors.directExecutor());
+ break;
+ }
+ case WAITING_FOR_REGISTRY: {
+diff --git
a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/SampleTezSessionState.java
b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/SampleTezSessionState.java
+index f5ab981f26..d4296b9b43 100644
+--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/SampleTezSessionState.java
++++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/SampleTezSessionState.java
+@@ -22,6 +22,7 @@
+ import com.google.common.util.concurrent.Futures;
+ import com.google.common.util.concurrent.FutureCallback;
+ import com.google.common.util.concurrent.ListenableFuture;
++import com.google.common.util.concurrent.MoreExecutors;
+ import com.google.common.util.concurrent.SettableFuture;
+ import java.io.IOException;
+ import java.util.concurrent.ScheduledExecutorService;
+@@ -128,7 +129,7 @@ public void onSuccess(Boolean result) {
+ public void onFailure(Throwable t) {
+ future.setException(t);
+ }
+- });
++ }, MoreExecutors.directExecutor());
+ return future;
+ }
+
+diff --git a/standalone-metastore/pom.xml b/standalone-metastore/pom.xml
+index 884dbfdf31..bd2e51edeb 100644
+--- a/standalone-metastore/pom.xml
++++ b/standalone-metastore/pom.xml
+@@ -70,7 +70,7 @@
+ <derby.version>10.10.2.0</derby.version>
+
<dropwizard-metrics-hadoop-metrics2-reporter.version>0.1.2</dropwizard-metrics-hadoop-metrics2-reporter.version>
+ <dropwizard.version>3.1.0</dropwizard.version>
+- <guava.version>19.0</guava.version>
++ <guava.version>28.1-jre</guava.version>
+ <hadoop.version>3.1.0</hadoop.version>
+ <hikaricp.version>2.6.1</hikaricp.version>
+ <jackson.version>2.9.5</jackson.version>
+diff --git a/storage-api/pom.xml b/storage-api/pom.xml
+index a40feff575..799e541332 100644
+--- a/storage-api/pom.xml
++++ b/storage-api/pom.xml
+@@ -32,7 +32,7 @@
+ <properties>
+ <commons-lang.version>2.6</commons-lang.version>
+ <commons-logging.version>1.1.3</commons-logging.version>
+- <guava.version>19.0</guava.version>
++ <guava.version>28.1-jre</guava.version>
+ <hadoop.version>3.0.0-beta1</hadoop.version>
+ <junit.version>4.11</junit.version>
+ <slf4j.version>1.7.10</slf4j.version>
diff --git a/bigtop-packages/src/common/hive/patch3-HIVE-23303.diff
b/bigtop-packages/src/common/hive/patch3-HIVE-23303.diff
deleted file mode 100644
index d73743f..0000000
--- a/bigtop-packages/src/common/hive/patch3-HIVE-23303.diff
+++ /dev/null
@@ -1,37 +0,0 @@
-diff --git
a/shims/common/src/main/java/org/apache/hadoop/fs/ProxyFileSystem.java
b/shims/common/src/main/java/org/apache/hadoop/fs/ProxyFileSystem.java
-index 2c37a51cf4..c8122a2d61 100644
---- a/shims/common/src/main/java/org/apache/hadoop/fs/ProxyFileSystem.java
-+++ b/shims/common/src/main/java/org/apache/hadoop/fs/ProxyFileSystem.java
-@@ -23,6 +23,7 @@
- import java.net.URISyntaxException;
-
- import org.apache.hadoop.conf.Configuration;
-+import org.apache.hadoop.fs.Options.Rename;
- import org.apache.hadoop.fs.permission.FsPermission;
- import org.apache.hadoop.util.Progressable;
- import org.apache.hadoop.util.Shell;
-@@ -180,6 +181,12 @@ public boolean rename(Path src, Path dst) throws
IOException {
- return super.isFile(dest) ? false : super.rename(swizzleParamPath(src),
dest);
- }
-
-+ @Override
-+ protected void rename(Path src, Path dst, Rename... options)
-+ throws IOException {
-+ super.rename(swizzleParamPath(src), swizzleParamPath(dst), options);
-+ }
-+
- @Override
- public boolean delete(Path f, boolean recursive) throws IOException {
- return super.delete(swizzleParamPath(f), recursive);
-@@ -215,6 +222,11 @@ public Path getWorkingDirectory() {
- return swizzleReturnPath(super.getWorkingDirectory());
- }
-
-+ @Override
-+ public boolean mkdirs(Path f) throws IOException {
-+ return super.mkdirs(swizzleParamPath(f));
-+ }
-+
- @Override
- public boolean mkdirs(Path f, FsPermission permission) throws IOException {
- return super.mkdirs(swizzleParamPath(f), permission);
diff --git
a/bigtop-packages/src/common/hive/patch4-HIVE-16402-hadoop-2.10.0.diff
b/bigtop-packages/src/common/hive/patch4-HIVE-16402-hadoop-2.10.0.diff
deleted file mode 100644
index c2069ee..0000000
--- a/bigtop-packages/src/common/hive/patch4-HIVE-16402-hadoop-2.10.0.diff
+++ /dev/null
@@ -1,251 +0,0 @@
-diff --git a/hcatalog/core/pom.xml b/hcatalog/core/pom.xml
-index 0cfa07f704..d1513746fe 100644
---- a/hcatalog/core/pom.xml
-+++ b/hcatalog/core/pom.xml
-@@ -191,7 +191,7 @@
- <artifactId>commons-logging</artifactId>
- </exclusion>
- </exclusions>
-- </dependency>
-+ </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
-@@ -208,13 +208,19 @@
- <artifactId>commons-logging</artifactId>
- </exclusion>
- </exclusions>
-- </dependency>
-+ </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-yarn-server-tests</artifactId>
- <version>${hadoop.version}</version>
- <classifier>tests</classifier>
- <scope>test</scope>
-+ <exclusions>
-+ <exclusion>
-+ <groupId>org.slf4j</groupId>
-+ <artifactId>slf4j-log4j12</artifactId>
-+ </exclusion>
-+ </exclusions>
- </dependency>
- <dependency>
- <groupId>org.apache.pig</groupId>
-diff --git a/llap-server/pom.xml b/llap-server/pom.xml
-index 8f70bd7d39..b10f05f82b 100644
---- a/llap-server/pom.xml
-+++ b/llap-server/pom.xml
-@@ -177,6 +177,10 @@
- <artifactId>slider-core</artifactId>
- <version>${slider.version}</version>
- <exclusions>
-+ <exclusion>
-+ <groupId>asm</groupId>
-+ <artifactId>asm</artifactId>
-+ </exclusion>
- <exclusion>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-common</artifactId>
-@@ -222,8 +226,8 @@
- <artifactId>jettison</artifactId>
- </exclusion>
- <exclusion>
-- <groupId>asm</groupId>
-- <artifactId>asm</artifactId>
-+ <groupId>org.slf4j</groupId>
-+ <artifactId>slf4j-log4j12</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
-diff --git a/metastore/pom.xml b/metastore/pom.xml
-index a50bbbc2fd..733f8912f3 100644
---- a/metastore/pom.xml
-+++ b/metastore/pom.xml
-@@ -141,6 +141,22 @@
- <artifactId>antlr-runtime</artifactId>
- <version>${antlr.version}</version>
- </dependency>
-+ <dependency>
-+ <groupId>org.apache.hadoop</groupId>
-+ <artifactId>hadoop-auth</artifactId>
-+ <version>${hadoop.version}</version>
-+ <optional>true</optional>
-+ <exclusions>
-+ <exclusion>
-+ <groupId>org.slf4j</groupId>
-+ <artifactId>slf4j-log4j12</artifactId>
-+ </exclusion>
-+ <exclusion>
-+ <groupId>commmons-logging</groupId>
-+ <artifactId>commons-logging</artifactId>
-+ </exclusion>
-+ </exclusions>
-+ </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-common</artifactId>
-diff --git a/pom.xml b/pom.xml
-index a73fac0200..a7c462e70e 100644
---- a/pom.xml
-+++ b/pom.xml
-@@ -141,7 +141,7 @@
- <guava.version>14.0.1</guava.version>
- <groovy.version>2.4.4</groovy.version>
- <h2database.version>1.3.166</h2database.version>
-- <hadoop.version>2.7.2</hadoop.version>
-+ <hadoop.version>2.10.0</hadoop.version>
-
<hadoop.bin.path>${basedir}/${hive.path.to.root}/testutils/hadoop</hadoop.bin.path>
- <hamcrest.version>1.1</hamcrest.version>
- <hbase.version>1.1.1</hbase.version>
-@@ -684,13 +684,24 @@
- <artifactId>commons-logging</artifactId>
- </exclusion>
- </exclusions>
-- </dependency>
-+ </dependency>
-+ <dependency>
-+ <groupId>org.apache.hadoop</groupId>
-+ <artifactId>hadoop-auth</artifactId>
-+ <version>${hadoop.version}</version>
-+ <exclusions>
-+ <exclusion>
-+ <groupId>commmons-logging</groupId>
-+ <artifactId>commons-logging</artifactId>
-+ </exclusion>
-+ </exclusions>
-+ </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-common</artifactId>
- <version>${hadoop.version}</version>
- <exclusions>
-- <exclusion>
-+ <exclusion>
- <groupId>org.slf4j</groupId>
- <artifactId>slf4j-log4j12</artifactId>
- </exclusion>
-@@ -730,7 +741,7 @@
- </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
-- <artifactId>hadoop-mapreduce-client-core</artifactId>
-+ <artifactId>hadoop-mapreduce-client-common</artifactId>
- <version>${hadoop.version}</version>
- <exclusions>
- <exclusion>
-@@ -743,11 +754,56 @@
- </exclusion>
- </exclusions>
- </dependency>
-+ <dependency>
-+ <groupId>org.apache.hadoop</groupId>
-+ <artifactId>hadoop-mapreduce-client-core</artifactId>
-+ <version>${hadoop.version}</version>
-+ <exclusions>
-+ <exclusion>
-+ <groupId>org.slf4j</groupId>
-+ <artifactId>slf4j-log4j12</artifactId>
-+ </exclusion>
-+ <exclusion>
-+ <groupId>commmons-logging</groupId>
-+ <artifactId>commons-logging</artifactId>
-+ </exclusion>
-+ </exclusions>
-+ </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-minikdc</artifactId>
- <version>${hadoop.version}</version>
- </dependency>
-+ <dependency>
-+ <groupId>org.apache.hadoop</groupId>
-+ <artifactId>hadoop-yarn-api</artifactId>
-+ <version>${hadoop.version}</version>
-+ </dependency>
-+ <dependency>
-+ <groupId>org.apache.hadoop</groupId>
-+ <artifactId>hadoop-yarn-client</artifactId>
-+ <version>${hadoop.version}</version>
-+ </dependency>
-+ <dependency>
-+ <groupId>org.apache.hadoop</groupId>
-+ <artifactId>hadoop-yarn-common</artifactId>
-+ <version>${hadoop.version}</version>
-+ </dependency>
-+ <dependency>
-+ <groupId>org.apache.hadoop</groupId>
-+ <artifactId>hadoop-yarn-registry</artifactId>
-+ <version>${hadoop.version}</version>
-+ </dependency>
-+ <dependency>
-+ <groupId>org.apache.hadoop</groupId>
-+ <artifactId>hadoop-yarn-server-web-common</artifactId>
-+ <version>${hadoop.version}</version>
-+ </dependency>
-+ <dependency>
-+ <groupId>org.apache.hadoop</groupId>
-+ <artifactId>hadoop-yarn-server-web-proxy</artifactId>
-+ <version>${hadoop.version}</version>
-+ </dependency>
- <dependency>
- <groupId>org.apache.hbase</groupId>
- <artifactId>hbase-common</artifactId>
-diff --git
a/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out
b/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out
-index 03012df373..580da46277 100644
---- a/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out
-+++ b/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out
-@@ -77,7 +77,7 @@ PREHOOK: query: ALTER TABLE default.encrypted_table RENAME
TO encrypted_db.encry
- PREHOOK: type: ALTERTABLE_RENAME
- PREHOOK: Input: default@encrypted_table
- PREHOOK: Output: default@encrypted_table
--FAILED: Execution Error, return code 1 from
org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table. Alter Table
operation for default.encrypted_table failed to move data due to:
'/build/ql/test/data/warehouse/encrypted_table can't be moved from encryption
zone /build/ql/test/data/warehouse/encrypted_table to encryption zone
/build/ql/test/data/warehouse/encrypted_db.db.' See hive log file for details.
-+FAILED: Execution Error, return code 1 from
org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table. Alter Table
operation for default.encrypted_table failed to move data due to:
'/build/ql/test/data/warehouse/encrypted_table can't be moved into an
encryption zone.' See hive log file for details.
- PREHOOK: query: SHOW TABLES
- PREHOOK: type: SHOWTABLES
- PREHOOK: Input: database:default
-diff --git a/shims/0.23/pom.xml b/shims/0.23/pom.xml
-index f4b580e841..7c586fab98 100644
---- a/shims/0.23/pom.xml
-+++ b/shims/0.23/pom.xml
-@@ -179,6 +179,10 @@
- <groupId>javax.servlet</groupId>
- <artifactId>servlet-api</artifactId>
- </exclusion>
-+ <exclusion>
-+ <groupId>org.slf4j</groupId>
-+ <artifactId>slf4j-log4j12</artifactId>
-+ </exclusion>
- </exclusions>
- </dependency>
- <dependency>
-diff --git a/shims/scheduler/pom.xml b/shims/scheduler/pom.xml
-index 2e868b046f..0eadb69435 100644
---- a/shims/scheduler/pom.xml
-+++ b/shims/scheduler/pom.xml
-@@ -76,6 +76,12 @@
- <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
- <version>${hadoop.version}</version>
- <optional>true</optional>
-+ <exclusions>
-+ <exclusion>
-+ <groupId>org.slf4j</groupId>
-+ <artifactId>slf4j-log4j12</artifactId>
-+ </exclusion>
-+ </exclusions>
- </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
-diff --git a/storage-api/pom.xml b/storage-api/pom.xml
-index e21bbb6d00..d0bf08813e 100644
---- a/storage-api/pom.xml
-+++ b/storage-api/pom.xml
-@@ -32,7 +32,7 @@
- <properties>
- <commons-lang.version>2.6</commons-lang.version>
- <guava.version>14.0.1</guava.version>
-- <hadoop.version>2.7.2</hadoop.version>
-+ <hadoop.version>2.10.0</hadoop.version>
- <junit.version>4.11</junit.version>
- <slf4j.version>1.7.10</slf4j.version>
- </properties>
diff --git a/bigtop-packages/src/common/hive/patch4-HIVE-19316.diff
b/bigtop-packages/src/common/hive/patch4-HIVE-19316.diff
new file mode 100644
index 0000000..457b74c
--- /dev/null
+++ b/bigtop-packages/src/common/hive/patch4-HIVE-19316.diff
@@ -0,0 +1,533 @@
+diff --git
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/ColumnsStatsUtils.java
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/ColumnsStatsUtils.java
+new file mode 100644
+index 0000000000..2d6d2261f7
+--- /dev/null
++++
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/ColumnsStatsUtils.java
+@@ -0,0 +1,117 @@
++/*
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements. See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership. The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at
++ *
++ * http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
++package org.apache.hadoop.hive.metastore.columnstats;
++
++import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
++import
org.apache.hadoop.hive.metastore.columnstats.cache.DateColumnStatsDataInspector;
++import
org.apache.hadoop.hive.metastore.columnstats.cache.DecimalColumnStatsDataInspector;
++import
org.apache.hadoop.hive.metastore.columnstats.cache.DoubleColumnStatsDataInspector;
++import
org.apache.hadoop.hive.metastore.columnstats.cache.LongColumnStatsDataInspector;
++import
org.apache.hadoop.hive.metastore.columnstats.cache.StringColumnStatsDataInspector;
++
++/**
++ * Utils class for columnstats package.
++ */
++public final class ColumnsStatsUtils {
++
++ private ColumnsStatsUtils(){}
++
++ /**
++ * Convertes to DateColumnStatsDataInspector if it's a DateColumnStatsData.
++ * @param cso ColumnStatisticsObj
++ * @return DateColumnStatsDataInspector
++ */
++ public static DateColumnStatsDataInspector
dateInspectorFromStats(ColumnStatisticsObj cso) {
++ DateColumnStatsDataInspector dateColumnStats;
++ if (cso.getStatsData().getDateStats() instanceof
DateColumnStatsDataInspector) {
++ dateColumnStats =
++ (DateColumnStatsDataInspector)(cso.getStatsData().getDateStats());
++ } else {
++ dateColumnStats = new
DateColumnStatsDataInspector(cso.getStatsData().getDateStats());
++ }
++ return dateColumnStats;
++ }
++
++ /**
++ * Convertes to StringColumnStatsDataInspector
++ * if it's a StringColumnStatsData.
++ * @param cso ColumnStatisticsObj
++ * @return StringColumnStatsDataInspector
++ */
++ public static StringColumnStatsDataInspector
stringInspectorFromStats(ColumnStatisticsObj cso) {
++ StringColumnStatsDataInspector columnStats;
++ if (cso.getStatsData().getStringStats() instanceof
StringColumnStatsDataInspector) {
++ columnStats =
++
(StringColumnStatsDataInspector)(cso.getStatsData().getStringStats());
++ } else {
++ columnStats = new
StringColumnStatsDataInspector(cso.getStatsData().getStringStats());
++ }
++ return columnStats;
++ }
++
++ /**
++ * Convertes to LongColumnStatsDataInspector if it's a LongColumnStatsData.
++ * @param cso ColumnStatisticsObj
++ * @return LongColumnStatsDataInspector
++ */
++ public static LongColumnStatsDataInspector
longInspectorFromStats(ColumnStatisticsObj cso) {
++ LongColumnStatsDataInspector columnStats;
++ if (cso.getStatsData().getLongStats() instanceof
LongColumnStatsDataInspector) {
++ columnStats =
++ (LongColumnStatsDataInspector)(cso.getStatsData().getLongStats());
++ } else {
++ columnStats = new
LongColumnStatsDataInspector(cso.getStatsData().getLongStats());
++ }
++ return columnStats;
++ }
++
++ /**
++ * Convertes to DoubleColumnStatsDataInspector
++ * if it's a DoubleColumnStatsData.
++ * @param cso ColumnStatisticsObj
++ * @return DoubleColumnStatsDataInspector
++ */
++ public static DoubleColumnStatsDataInspector
doubleInspectorFromStats(ColumnStatisticsObj cso) {
++ DoubleColumnStatsDataInspector columnStats;
++ if (cso.getStatsData().getDoubleStats() instanceof
DoubleColumnStatsDataInspector) {
++ columnStats =
++
(DoubleColumnStatsDataInspector)(cso.getStatsData().getDoubleStats());
++ } else {
++ columnStats = new
DoubleColumnStatsDataInspector(cso.getStatsData().getDoubleStats());
++ }
++ return columnStats;
++ }
++
++ /**
++ * Convertes to DecimalColumnStatsDataInspector
++ * if it's a DecimalColumnStatsData.
++ * @param cso ColumnStatisticsObj
++ * @return DecimalColumnStatsDataInspector
++ */
++ public static DecimalColumnStatsDataInspector
decimalInspectorFromStats(ColumnStatisticsObj cso) {
++ DecimalColumnStatsDataInspector columnStats;
++ if (cso.getStatsData().getDecimalStats() instanceof
DecimalColumnStatsDataInspector) {
++ columnStats =
++
(DecimalColumnStatsDataInspector)(cso.getStatsData().getDecimalStats());
++ } else {
++ columnStats = new
DecimalColumnStatsDataInspector(cso.getStatsData().getDecimalStats());
++ }
++ return columnStats;
++ }
++}
+diff --git
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DateColumnStatsAggregator.java
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DateColumnStatsAggregator.java
+index e8ff513f50..9495424410 100644
+---
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DateColumnStatsAggregator.java
++++
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DateColumnStatsAggregator.java
+@@ -38,6 +38,8 @@
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+
++import static
org.apache.hadoop.hive.metastore.columnstats.ColumnsStatsUtils.dateInspectorFromStats;
++
+ public class DateColumnStatsAggregator extends ColumnStatsAggregator
implements
+ IExtrapolatePartStatus {
+
+@@ -62,8 +64,8 @@ public ColumnStatisticsObj
aggregate(List<ColStatsObjWithSourceInfo> colStatsWit
+ cso.getStatsData().getSetField());
+ LOG.trace("doAllPartitionContainStats for column: {} is: {}",
colName, doAllPartitionContainStats);
+ }
+- DateColumnStatsDataInspector dateColumnStats =
+- (DateColumnStatsDataInspector) cso.getStatsData().getDateStats();
++ DateColumnStatsDataInspector dateColumnStats =
dateInspectorFromStats(cso);
++
+ if (dateColumnStats.getNdvEstimator() == null) {
+ ndvEstimator = null;
+ break;
+@@ -95,9 +97,7 @@ public ColumnStatisticsObj
aggregate(List<ColStatsObjWithSourceInfo> colStatsWit
+ double densityAvgSum = 0.0;
+ for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
+ ColumnStatisticsObj cso = csp.getColStatsObj();
+- DateColumnStatsDataInspector newData =
+- (DateColumnStatsDataInspector) cso.getStatsData().getDateStats();
+- lowerBound = Math.max(lowerBound, newData.getNumDVs());
++ DateColumnStatsDataInspector newData = dateInspectorFromStats(cso);
+ higherBound += newData.getNumDVs();
+ densityAvgSum += (diff(newData.getHighValue(), newData.getLowValue()))
+ / newData.getNumDVs();
+@@ -174,8 +174,7 @@ public ColumnStatisticsObj
aggregate(List<ColStatsObjWithSourceInfo> colStatsWit
+ for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
+ ColumnStatisticsObj cso = csp.getColStatsObj();
+ String partName = csp.getPartName();
+- DateColumnStatsDataInspector newData =
+- (DateColumnStatsDataInspector)
cso.getStatsData().getDateStats();
++ DateColumnStatsDataInspector newData = dateInspectorFromStats(cso);
+ // newData.isSetBitVectors() should be true for sure because we
+ // already checked it before.
+ if (indexMap.get(partName) != curIndex) {
+diff --git
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DecimalColumnStatsAggregator.java
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DecimalColumnStatsAggregator.java
+index ac7e8e35f9..8739e73d00 100644
+---
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DecimalColumnStatsAggregator.java
++++
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DecimalColumnStatsAggregator.java
+@@ -40,6 +40,8 @@
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+
++import static
org.apache.hadoop.hive.metastore.columnstats.ColumnsStatsUtils.decimalInspectorFromStats;
++
+ public class DecimalColumnStatsAggregator extends ColumnStatsAggregator
implements
+ IExtrapolatePartStatus {
+
+@@ -65,8 +67,8 @@ public ColumnStatisticsObj
aggregate(List<ColStatsObjWithSourceInfo> colStatsWit
+ LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName,
+ doAllPartitionContainStats);
+ }
+- DecimalColumnStatsDataInspector decimalColumnStatsData =
+- (DecimalColumnStatsDataInspector)
cso.getStatsData().getDecimalStats();
++ DecimalColumnStatsDataInspector decimalColumnStatsData =
decimalInspectorFromStats(cso);
++
+ if (decimalColumnStatsData.getNdvEstimator() == null) {
+ ndvEstimator = null;
+ break;
+@@ -98,8 +100,7 @@ public ColumnStatisticsObj
aggregate(List<ColStatsObjWithSourceInfo> colStatsWit
+ double densityAvgSum = 0.0;
+ for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
+ ColumnStatisticsObj cso = csp.getColStatsObj();
+- DecimalColumnStatsDataInspector newData =
+- (DecimalColumnStatsDataInspector)
cso.getStatsData().getDecimalStats();
++ DecimalColumnStatsDataInspector newData =
decimalInspectorFromStats(cso);
+ lowerBound = Math.max(lowerBound, newData.getNumDVs());
+ higherBound += newData.getNumDVs();
+ densityAvgSum +=
(MetaStoreUtils.decimalToDouble(newData.getHighValue()) - MetaStoreUtils
+@@ -187,8 +188,7 @@ public ColumnStatisticsObj
aggregate(List<ColStatsObjWithSourceInfo> colStatsWit
+ for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
+ ColumnStatisticsObj cso = csp.getColStatsObj();
+ String partName = csp.getPartName();
+- DecimalColumnStatsDataInspector newData =
+- (DecimalColumnStatsDataInspector)
cso.getStatsData().getDecimalStats();
++ DecimalColumnStatsDataInspector newData =
decimalInspectorFromStats(cso);
+ // newData.isSetBitVectors() should be true for sure because we
+ // already checked it before.
+ if (indexMap.get(partName) != curIndex) {
+diff --git
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DoubleColumnStatsAggregator.java
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DoubleColumnStatsAggregator.java
+index ece77dd51b..5ad84536f6 100644
+---
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DoubleColumnStatsAggregator.java
++++
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DoubleColumnStatsAggregator.java
+@@ -37,6 +37,8 @@
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+
++import static
org.apache.hadoop.hive.metastore.columnstats.ColumnsStatsUtils.doubleInspectorFromStats;
++
+ public class DoubleColumnStatsAggregator extends ColumnStatsAggregator
implements
+ IExtrapolatePartStatus {
+
+@@ -63,7 +65,7 @@ public ColumnStatisticsObj
aggregate(List<ColStatsObjWithSourceInfo> colStatsWit
+ doAllPartitionContainStats);
+ }
+ DoubleColumnStatsDataInspector doubleColumnStatsData =
+- (DoubleColumnStatsDataInspector)
cso.getStatsData().getDoubleStats();
++ doubleInspectorFromStats(cso);
+ if (doubleColumnStatsData.getNdvEstimator() == null) {
+ ndvEstimator = null;
+ break;
+@@ -95,8 +97,7 @@ public ColumnStatisticsObj
aggregate(List<ColStatsObjWithSourceInfo> colStatsWit
+ double densityAvgSum = 0.0;
+ for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
+ ColumnStatisticsObj cso = csp.getColStatsObj();
+- DoubleColumnStatsDataInspector newData =
+- (DoubleColumnStatsDataInspector)
cso.getStatsData().getDoubleStats();
++ DoubleColumnStatsDataInspector newData =
doubleInspectorFromStats(cso);
+ lowerBound = Math.max(lowerBound, newData.getNumDVs());
+ higherBound += newData.getNumDVs();
+ densityAvgSum += (newData.getHighValue() - newData.getLowValue()) /
newData.getNumDVs();
+@@ -173,7 +174,7 @@ public ColumnStatisticsObj
aggregate(List<ColStatsObjWithSourceInfo> colStatsWit
+ ColumnStatisticsObj cso = csp.getColStatsObj();
+ String partName = csp.getPartName();
+ DoubleColumnStatsDataInspector newData =
+- (DoubleColumnStatsDataInspector)
cso.getStatsData().getDoubleStats();
++ doubleInspectorFromStats(cso);
+ // newData.isSetBitVectors() should be true for sure because we
+ // already checked it before.
+ if (indexMap.get(partName) != curIndex) {
+diff --git
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java
+index e6823d342a..ab3153933d 100644
+---
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java
++++
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java
+@@ -38,6 +38,8 @@
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+
++import static
org.apache.hadoop.hive.metastore.columnstats.ColumnsStatsUtils.longInspectorFromStats;
++
+ public class LongColumnStatsAggregator extends ColumnStatsAggregator
implements
+ IExtrapolatePartStatus {
+
+@@ -63,8 +65,7 @@ public ColumnStatisticsObj
aggregate(List<ColStatsObjWithSourceInfo> colStatsWit
+ LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName,
+ doAllPartitionContainStats);
+ }
+- LongColumnStatsDataInspector longColumnStatsData =
+- (LongColumnStatsDataInspector) cso.getStatsData().getLongStats();
++ LongColumnStatsDataInspector longColumnStatsData =
longInspectorFromStats(cso);
+ if (longColumnStatsData.getNdvEstimator() == null) {
+ ndvEstimator = null;
+ break;
+@@ -96,8 +97,7 @@ public ColumnStatisticsObj
aggregate(List<ColStatsObjWithSourceInfo> colStatsWit
+ double densityAvgSum = 0.0;
+ for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
+ ColumnStatisticsObj cso = csp.getColStatsObj();
+- LongColumnStatsDataInspector newData =
+- (LongColumnStatsDataInspector) cso.getStatsData().getLongStats();
++ LongColumnStatsDataInspector newData = longInspectorFromStats(cso);
+ lowerBound = Math.max(lowerBound, newData.getNumDVs());
+ higherBound += newData.getNumDVs();
+ densityAvgSum += (newData.getHighValue() - newData.getLowValue()) /
newData.getNumDVs();
+@@ -174,8 +174,7 @@ public ColumnStatisticsObj
aggregate(List<ColStatsObjWithSourceInfo> colStatsWit
+ for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
+ ColumnStatisticsObj cso = csp.getColStatsObj();
+ String partName = csp.getPartName();
+- LongColumnStatsDataInspector newData =
+- (LongColumnStatsDataInspector)
cso.getStatsData().getLongStats();
++ LongColumnStatsDataInspector newData = longInspectorFromStats(cso);
+ // newData.isSetBitVectors() should be true for sure because we
+ // already checked it before.
+ if (indexMap.get(partName) != curIndex) {
+diff --git
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java
+index 9537647503..92fdda51a7 100644
+---
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java
++++
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java
+@@ -38,6 +38,8 @@
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+
++import static
org.apache.hadoop.hive.metastore.columnstats.ColumnsStatsUtils.stringInspectorFromStats;
++
+ public class StringColumnStatsAggregator extends ColumnStatsAggregator
implements
+ IExtrapolatePartStatus {
+
+@@ -63,8 +65,7 @@ public ColumnStatisticsObj
aggregate(List<ColStatsObjWithSourceInfo> colStatsWit
+ LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName,
+ doAllPartitionContainStats);
+ }
+- StringColumnStatsDataInspector stringColumnStatsData =
+- (StringColumnStatsDataInspector)
cso.getStatsData().getStringStats();
++ StringColumnStatsDataInspector stringColumnStatsData =
stringInspectorFromStats(cso);
+ if (stringColumnStatsData.getNdvEstimator() == null) {
+ ndvEstimator = null;
+ break;
+@@ -93,8 +94,7 @@ public ColumnStatisticsObj
aggregate(List<ColStatsObjWithSourceInfo> colStatsWit
+ StringColumnStatsDataInspector aggregateData = null;
+ for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
+ ColumnStatisticsObj cso = csp.getColStatsObj();
+- StringColumnStatsDataInspector newData =
+- (StringColumnStatsDataInspector)
cso.getStatsData().getStringStats();
++ StringColumnStatsDataInspector newData =
stringInspectorFromStats(cso);
+ if (ndvEstimator != null) {
+ ndvEstimator.mergeEstimators(newData.getNdvEstimator());
+ }
+@@ -149,7 +149,7 @@ public ColumnStatisticsObj
aggregate(List<ColStatsObjWithSourceInfo> colStatsWit
+ ColumnStatisticsObj cso = csp.getColStatsObj();
+ String partName = csp.getPartName();
+ StringColumnStatsDataInspector newData =
+- (StringColumnStatsDataInspector)
cso.getStatsData().getStringStats();
++ stringInspectorFromStats(cso);
+ // newData.isSetBitVectors() should be true for sure because we
+ // already checked it before.
+ if (indexMap.get(partName) != curIndex) {
+@@ -211,7 +211,8 @@ public void extrapolate(ColumnStatisticsData
extrapolateData, int numParts,
+ int numPartsWithStats, Map<String, Double> adjustedIndexMap,
+ Map<String, ColumnStatisticsData> adjustedStatsMap, double densityAvg) {
+ int rightBorderInd = numParts;
+- StringColumnStatsDataInspector extrapolateStringData = new
StringColumnStatsDataInspector();
++ StringColumnStatsDataInspector extrapolateStringData =
++ new StringColumnStatsDataInspector();
+ Map<String, StringColumnStatsData> extractedAdjustedStatsMap = new
HashMap<>();
+ for (Map.Entry<String, ColumnStatisticsData> entry :
adjustedStatsMap.entrySet()) {
+ extractedAdjustedStatsMap.put(entry.getKey(),
entry.getValue().getStringStats());
+diff --git
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/DateColumnStatsDataInspector.java
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/DateColumnStatsDataInspector.java
+index f6eacbc928..d66e19aee7 100644
+---
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/DateColumnStatsDataInspector.java
++++
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/DateColumnStatsDataInspector.java
+@@ -43,6 +43,10 @@ public
DateColumnStatsDataInspector(DateColumnStatsDataInspector other) {
+ }
+ }
+
++ public DateColumnStatsDataInspector(DateColumnStatsData other) {
++ super(other);
++ }
++
+ @Override
+ public DateColumnStatsDataInspector deepCopy() {
+ return new DateColumnStatsDataInspector(this);
+diff --git
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/DecimalColumnStatsDataInspector.java
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/DecimalColumnStatsDataInspector.java
+index e2427f31b6..88cab2c1b2 100644
+---
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/DecimalColumnStatsDataInspector.java
++++
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/DecimalColumnStatsDataInspector.java
+@@ -43,6 +43,10 @@ public
DecimalColumnStatsDataInspector(DecimalColumnStatsDataInspector other) {
+ }
+ }
+
++ public DecimalColumnStatsDataInspector(DecimalColumnStatsData other) {
++ super(other);
++ }
++
+ @Override
+ public DecimalColumnStatsDataInspector deepCopy() {
+ return new DecimalColumnStatsDataInspector(this);
+diff --git
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/DoubleColumnStatsDataInspector.java
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/DoubleColumnStatsDataInspector.java
+index 7ce71271e5..2ee7fad9c1 100644
+---
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/DoubleColumnStatsDataInspector.java
++++
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/DoubleColumnStatsDataInspector.java
+@@ -43,6 +43,10 @@ public
DoubleColumnStatsDataInspector(DoubleColumnStatsDataInspector other) {
+ }
+ }
+
++ public DoubleColumnStatsDataInspector(DoubleColumnStatsData other) {
++ super(other);
++ }
++
+ @Override
+ public DoubleColumnStatsDataInspector deepCopy() {
+ return new DoubleColumnStatsDataInspector(this);
+diff --git
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/LongColumnStatsDataInspector.java
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/LongColumnStatsDataInspector.java
+index faf314b0fc..a4d0a1d326 100644
+---
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/LongColumnStatsDataInspector.java
++++
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/LongColumnStatsDataInspector.java
+@@ -43,6 +43,10 @@ public
LongColumnStatsDataInspector(LongColumnStatsDataInspector other) {
+ }
+ }
+
++ public LongColumnStatsDataInspector(LongColumnStatsData other) {
++ super(other);
++ }
++
+ @Override
+ public LongColumnStatsDataInspector deepCopy() {
+ return new LongColumnStatsDataInspector(this);
+diff --git
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/StringColumnStatsDataInspector.java
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/StringColumnStatsDataInspector.java
+index 087641028e..12afb9cd1d 100644
+---
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/StringColumnStatsDataInspector.java
++++
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/StringColumnStatsDataInspector.java
+@@ -44,6 +44,10 @@ public
StringColumnStatsDataInspector(StringColumnStatsDataInspector other) {
+ }
+ }
+
++ public StringColumnStatsDataInspector(StringColumnStatsData other) {
++ super(other);
++ }
++
+ @Override
+ public StringColumnStatsDataInspector deepCopy() {
+ return new StringColumnStatsDataInspector(this);
+diff --git
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DateColumnStatsMerger.java
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DateColumnStatsMerger.java
+index 5baebbb47b..3e0ce07f7c 100644
+---
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DateColumnStatsMerger.java
++++
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DateColumnStatsMerger.java
+@@ -24,13 +24,13 @@
+ import org.apache.hadoop.hive.metastore.api.Date;
+ import
org.apache.hadoop.hive.metastore.columnstats.cache.DateColumnStatsDataInspector;
+
++import static
org.apache.hadoop.hive.metastore.columnstats.ColumnsStatsUtils.dateInspectorFromStats;
++
+ public class DateColumnStatsMerger extends ColumnStatsMerger {
+ @Override
+ public void merge(ColumnStatisticsObj aggregateColStats,
ColumnStatisticsObj newColStats) {
+- DateColumnStatsDataInspector aggregateData =
+- (DateColumnStatsDataInspector)
aggregateColStats.getStatsData().getDateStats();
+- DateColumnStatsDataInspector newData =
+- (DateColumnStatsDataInspector)
newColStats.getStatsData().getDateStats();
++ DateColumnStatsDataInspector aggregateData =
dateInspectorFromStats(aggregateColStats);
++ DateColumnStatsDataInspector newData =
dateInspectorFromStats(newColStats);
+ Date lowValue =
aggregateData.getLowValue().compareTo(newData.getLowValue()) < 0 ? aggregateData
+ .getLowValue() : newData.getLowValue();
+ aggregateData.setLowValue(lowValue);
+diff --git
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DecimalColumnStatsMerger.java
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DecimalColumnStatsMerger.java
+index 517ca7259b..50943588e3 100644
+---
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DecimalColumnStatsMerger.java
++++
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DecimalColumnStatsMerger.java
+@@ -24,13 +24,15 @@
+ import org.apache.hadoop.hive.metastore.api.Decimal;
+ import
org.apache.hadoop.hive.metastore.columnstats.cache.DecimalColumnStatsDataInspector;
+
++import static
org.apache.hadoop.hive.metastore.columnstats.ColumnsStatsUtils.decimalInspectorFromStats;
++
+ public class DecimalColumnStatsMerger extends ColumnStatsMerger {
+ @Override
+ public void merge(ColumnStatisticsObj aggregateColStats,
ColumnStatisticsObj newColStats) {
+ DecimalColumnStatsDataInspector aggregateData =
+- (DecimalColumnStatsDataInspector)
aggregateColStats.getStatsData().getDecimalStats();
++ decimalInspectorFromStats(aggregateColStats);
+ DecimalColumnStatsDataInspector newData =
+- (DecimalColumnStatsDataInspector)
newColStats.getStatsData().getDecimalStats();
++ decimalInspectorFromStats(newColStats);
+
+ Decimal lowValue = getMin(aggregateData.getLowValue(),
newData.getLowValue());
+ aggregateData.setLowValue(lowValue);
+diff --git
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DoubleColumnStatsMerger.java
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DoubleColumnStatsMerger.java
+index 6a95751815..cbacacd626 100644
+---
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DoubleColumnStatsMerger.java
++++
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DoubleColumnStatsMerger.java
+@@ -23,13 +23,13 @@
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+ import
org.apache.hadoop.hive.metastore.columnstats.cache.DoubleColumnStatsDataInspector;
+
++import static
org.apache.hadoop.hive.metastore.columnstats.ColumnsStatsUtils.doubleInspectorFromStats;
++
+ public class DoubleColumnStatsMerger extends ColumnStatsMerger {
+ @Override
+ public void merge(ColumnStatisticsObj aggregateColStats,
ColumnStatisticsObj newColStats) {
+- DoubleColumnStatsDataInspector aggregateData =
+- (DoubleColumnStatsDataInspector)
aggregateColStats.getStatsData().getDoubleStats();
+- DoubleColumnStatsDataInspector newData =
+- (DoubleColumnStatsDataInspector)
newColStats.getStatsData().getDoubleStats();
++ DoubleColumnStatsDataInspector aggregateData =
doubleInspectorFromStats(aggregateColStats);
++ DoubleColumnStatsDataInspector newData =
doubleInspectorFromStats(newColStats);
+ aggregateData.setLowValue(Math.min(aggregateData.getLowValue(),
newData.getLowValue()));
+ aggregateData.setHighValue(Math.max(aggregateData.getHighValue(),
newData.getHighValue()));
+ aggregateData.setNumNulls(aggregateData.getNumNulls() +
newData.getNumNulls());
+diff --git
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/LongColumnStatsMerger.java
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/LongColumnStatsMerger.java
+index ca1a912052..8e70371e69 100644
+---
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/LongColumnStatsMerger.java
++++
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/LongColumnStatsMerger.java
+@@ -23,13 +23,13 @@
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+ import
org.apache.hadoop.hive.metastore.columnstats.cache.LongColumnStatsDataInspector;
+
++import static
org.apache.hadoop.hive.metastore.columnstats.ColumnsStatsUtils.longInspectorFromStats;
++
+ public class LongColumnStatsMerger extends ColumnStatsMerger {
+ @Override
+ public void merge(ColumnStatisticsObj aggregateColStats,
ColumnStatisticsObj newColStats) {
+- LongColumnStatsDataInspector aggregateData =
+- (LongColumnStatsDataInspector)
aggregateColStats.getStatsData().getLongStats();
+- LongColumnStatsDataInspector newData =
+- (LongColumnStatsDataInspector)
newColStats.getStatsData().getLongStats();
++ LongColumnStatsDataInspector aggregateData =
longInspectorFromStats(aggregateColStats);
++ LongColumnStatsDataInspector newData =
longInspectorFromStats(newColStats);
+ aggregateData.setLowValue(Math.min(aggregateData.getLowValue(),
newData.getLowValue()));
+ aggregateData.setHighValue(Math.max(aggregateData.getHighValue(),
newData.getHighValue()));
+ aggregateData.setNumNulls(aggregateData.getNumNulls() +
newData.getNumNulls());
+diff --git
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/StringColumnStatsMerger.java
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/StringColumnStatsMerger.java
+index d6b4478ec8..762685d00d 100644
+---
a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/StringColumnStatsMerger.java
++++
b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/StringColumnStatsMerger.java
+@@ -23,13 +23,13 @@
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+ import
org.apache.hadoop.hive.metastore.columnstats.cache.StringColumnStatsDataInspector;
+
++import static
org.apache.hadoop.hive.metastore.columnstats.ColumnsStatsUtils.stringInspectorFromStats;
++
+ public class StringColumnStatsMerger extends ColumnStatsMerger {
+ @Override
+ public void merge(ColumnStatisticsObj aggregateColStats,
ColumnStatisticsObj newColStats) {
+- StringColumnStatsDataInspector aggregateData =
+- (StringColumnStatsDataInspector)
aggregateColStats.getStatsData().getStringStats();
+- StringColumnStatsDataInspector newData =
+- (StringColumnStatsDataInspector)
newColStats.getStatsData().getStringStats();
++ StringColumnStatsDataInspector aggregateData =
stringInspectorFromStats(aggregateColStats);
++ StringColumnStatsDataInspector newData =
stringInspectorFromStats(newColStats);
+ aggregateData.setMaxColLen(Math.max(aggregateData.getMaxColLen(),
newData.getMaxColLen()));
+ aggregateData.setAvgColLen(Math.max(aggregateData.getAvgColLen(),
newData.getAvgColLen()));
+ aggregateData.setNumNulls(aggregateData.getNumNulls() +
newData.getNumNulls());
diff --git a/bigtop-packages/src/common/hive/patch5-HIVE-17368.diff
b/bigtop-packages/src/common/hive/patch5-HIVE-17368.diff
deleted file mode 100644
index 00c907e..0000000
--- a/bigtop-packages/src/common/hive/patch5-HIVE-17368.diff
+++ /dev/null
@@ -1,293 +0,0 @@
-diff --git
a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/MiniHiveKdc.java
b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/MiniHiveKdc.java
-index bbec37eea76..c02879d9bfd 100644
----
a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/MiniHiveKdc.java
-+++
b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/MiniHiveKdc.java
-@@ -51,6 +51,7 @@
- public static String HIVE_TEST_USER_2 = "user2";
- public static String HIVE_TEST_SUPER_USER = "superuser";
- public static String AUTHENTICATION_TYPE = "KERBEROS";
-+ private static final String HIVE_METASTORE_SERVICE_PRINCIPAL = "hive";
-
- private final MiniKdc miniKdc;
- private final File workDir;
-@@ -204,6 +205,39 @@ public static MiniHS2
getMiniHS2WithKerbWithRemoteHMS(MiniHiveKdc miniHiveKdc, H
- return getMiniHS2WithKerbWithRemoteHMS(miniHiveKdc, hiveConf,
AUTHENTICATION_TYPE);
- }
-
-+ public static MiniHS2 getMiniHS2WithKerbWithRemoteHMSWithKerb(MiniHiveKdc
miniHiveKdc,
-+ HiveConf hiveConf) throws Exception {
-+ return getMiniHS2WithKerbWithRemoteHMSWithKerb(miniHiveKdc, hiveConf,
AUTHENTICATION_TYPE);
-+ }
-+
-+ /**
-+ * Create a MiniHS2 with the hive service principal and keytab in
MiniHiveKdc. It uses remote HMS
-+ * and can support a different Sasl authType. It creates a metastore
service principal and keytab
-+ * which can be used for secure HMS
-+ * @param miniHiveKdc
-+ * @param hiveConf
-+ * @param authenticationType
-+ * @return new MiniHS2 instance
-+ * @throws Exception
-+ */
-+ private static MiniHS2 getMiniHS2WithKerbWithRemoteHMSWithKerb(MiniHiveKdc
miniHiveKdc,
-+ HiveConf hiveConf, String authenticationType) throws Exception {
-+ String hivePrincipal =
-+
miniHiveKdc.getFullyQualifiedServicePrincipal(MiniHiveKdc.HIVE_SERVICE_PRINCIPAL);
-+ String hiveKeytab = miniHiveKdc.getKeyTabFile(
-+
miniHiveKdc.getServicePrincipalForUser(MiniHiveKdc.HIVE_SERVICE_PRINCIPAL));
-+
-+ String hiveMetastorePrincipal =
-+
miniHiveKdc.getFullyQualifiedServicePrincipal(MiniHiveKdc.HIVE_METASTORE_SERVICE_PRINCIPAL);
-+ String hiveMetastoreKeytab = miniHiveKdc.getKeyTabFile(
-+
miniHiveKdc.getServicePrincipalForUser(MiniHiveKdc.HIVE_METASTORE_SERVICE_PRINCIPAL));
-+
-+ return new MiniHS2.Builder().withConf(hiveConf)
-+ .withSecureRemoteMetastore(hiveMetastorePrincipal,
hiveMetastoreKeytab).
-+ withMiniKdc(hivePrincipal,
hiveKeytab).withAuthenticationType(authenticationType)
-+ .build();
-+ }
-+
- /**
- * Create a MiniHS2 with the hive service principal and keytab in
MiniHiveKdc. It uses remote HMS
- * and can support a different Sasl authType
-diff --git
a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithDBTokenStore.java
b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithDBTokenStore.java
-index d690aaa673a..fc1dc493d7f 100644
----
a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithDBTokenStore.java
-+++
b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithDBTokenStore.java
-@@ -34,7 +34,11 @@ public static void beforeTest() throws Exception {
- HiveConf hiveConf = new HiveConf();
- hiveConf.setVar(ConfVars.METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS,
"org.apache.hadoop.hive.thrift.DBTokenStore");
- miniHiveKdc = MiniHiveKdc.getMiniHiveKdc(hiveConf);
-- miniHS2 = MiniHiveKdc.getMiniHS2WithKerbWithRemoteHMS(miniHiveKdc,
hiveConf);
-+ miniHS2 =
MiniHiveKdc.getMiniHS2WithKerbWithRemoteHMSWithKerb(miniHiveKdc, hiveConf);
- miniHS2.start(confOverlay);
-+ String metastorePrincipal =
miniHS2.getConfProperty(ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname);
-+ String hs2Principal =
miniHS2.getConfProperty(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL.varname);
-+ String hs2KeyTab =
miniHS2.getConfProperty(ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB.varname);
-+ System.out.println("HS2 principal : " + hs2Principal + " HS2 keytab : " +
hs2KeyTab + " Metastore principal : " + metastorePrincipal);
- }
- }
-\ No newline at end of file
-diff --git
a/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/thrift/TestHadoopAuthBridge23.java
b/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/thrift/TestHadoopAuthBridge23.java
-index 36a9ea830a6..7e5005cdcfd 100644
----
a/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/thrift/TestHadoopAuthBridge23.java
-+++
b/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/thrift/TestHadoopAuthBridge23.java
-@@ -169,7 +169,7 @@ public void testDelegationTokenSharedStore() throws
Exception {
- tokenManager.startThreads();
- tokenManager.stopThreads();
-
-- String tokenStrForm =
tokenManager.getDelegationToken(clientUgi.getShortUserName());
-+ String tokenStrForm =
tokenManager.getDelegationToken(clientUgi.getShortUserName(),
clientUgi.getShortUserName());
- Token<DelegationTokenIdentifier> t= new
Token<DelegationTokenIdentifier>();
- t.decodeFromUrlString(tokenStrForm);
-
-diff --git
a/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
b/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
-index 71f9640ad21..ebc4c10b640 100644
---- a/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
-+++ b/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
-@@ -72,6 +72,7 @@
- private final String serverPrincipal;
- private final boolean isMetastoreRemote;
- private final boolean cleanupLocalDirOnStartup;
-+ private final boolean isMetastoreSecure;
- private MiniClusterType miniClusterType = MiniClusterType.LOCALFS_ONLY;
-
- public enum MiniClusterType {
-@@ -93,6 +94,9 @@
- private String authType = "KERBEROS";
- private boolean isHA = false;
- private boolean cleanupLocalDirOnStartup = true;
-+ private boolean isMetastoreSecure;
-+ private String metastoreServerPrincipal;
-+ private String metastoreServerKeyTab;
-
- public Builder() {
- }
-@@ -119,6 +123,14 @@ public Builder withRemoteMetastore() {
- return this;
- }
-
-+ public Builder withSecureRemoteMetastore(String metastoreServerPrincipal,
String metastoreServerKeyTab) {
-+ this.isMetastoreRemote = true;
-+ this.isMetastoreSecure = true;
-+ this.metastoreServerPrincipal = metastoreServerPrincipal;
-+ this.metastoreServerKeyTab = metastoreServerKeyTab;
-+ return this;
-+ }
-+
- public Builder withConf(HiveConf hiveConf) {
- this.hiveConf = hiveConf;
- return this;
-@@ -153,7 +165,8 @@ public MiniHS2 build() throws Exception {
- hiveConf.setVar(ConfVars.HIVE_SERVER2_TRANSPORT_MODE,
HS2_BINARY_MODE);
- }
- return new MiniHS2(hiveConf, miniClusterType, useMiniKdc,
serverPrincipal, serverKeytab,
-- isMetastoreRemote, usePortsFromConf, authType, isHA,
cleanupLocalDirOnStartup);
-+ isMetastoreRemote, usePortsFromConf, authType, isHA,
cleanupLocalDirOnStartup,
-+ isMetastoreSecure, metastoreServerPrincipal, metastoreServerKeyTab);
- }
- }
-
-@@ -191,7 +204,10 @@ public boolean isUseMiniKdc() {
-
- private MiniHS2(HiveConf hiveConf, MiniClusterType miniClusterType, boolean
useMiniKdc,
- String serverPrincipal, String serverKeytab, boolean isMetastoreRemote,
-- boolean usePortsFromConf, String authType, boolean isHA, boolean
cleanupLocalDirOnStartup) throws Exception {
-+ boolean usePortsFromConf, String authType, boolean isHA, boolean
cleanupLocalDirOnStartup,
-+ boolean isMetastoreSecure,
-+ String metastoreServerPrincipal,
-+ String metastoreKeyTab) throws Exception {
- // Always use localhost for hostname as some tests like SSL CN validation
ones
- // are tied to localhost being present in the certificate name
- super(
-@@ -208,6 +224,7 @@ private MiniHS2(HiveConf hiveConf, MiniClusterType
miniClusterType, boolean useM
- this.useMiniKdc = useMiniKdc;
- this.serverPrincipal = serverPrincipal;
- this.isMetastoreRemote = isMetastoreRemote;
-+ this.isMetastoreSecure = isMetastoreSecure;
- this.cleanupLocalDirOnStartup = cleanupLocalDirOnStartup;
- baseDir = getBaseDir();
- localFS = FileSystem.getLocal(hiveConf);
-@@ -261,9 +278,15 @@ private MiniHS2(HiveConf hiveConf, MiniClusterType
miniClusterType, boolean useM
- hiveConf.setVar(ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB, serverKeytab);
- hiveConf.setVar(ConfVars.HIVE_SERVER2_AUTHENTICATION, authType);
- }
-- String metaStoreURL =
-- "jdbc:derby:;databaseName=" + baseDir.getAbsolutePath() +
File.separator
-- + "test_metastore;create=true";
-+
-+ String metaStoreURL = "jdbc:derby:;databaseName=" +
baseDir.getAbsolutePath() + File.separator
-+ + "test_metastore;create=true";
-+
-+ if (isMetastoreSecure) {
-+ hiveConf.setVar(ConfVars.METASTORE_KERBEROS_PRINCIPAL,
metastoreServerPrincipal);
-+ hiveConf.setVar(ConfVars.METASTORE_KERBEROS_KEYTAB_FILE,
metastoreKeyTab);
-+ hiveConf.setBoolVar(ConfVars.METASTORE_USE_THRIFT_SASL, true);
-+ }
-
- fs.mkdirs(baseFsDir);
- Path wareHouseDir = new Path(baseFsDir, "warehouse");
-@@ -301,10 +324,11 @@ public MiniHS2(HiveConf hiveConf, MiniClusterType
clusterType) throws Exception
- this(hiveConf, clusterType, false);
- }
-
-- public MiniHS2(HiveConf hiveConf, MiniClusterType clusterType,
-- boolean usePortsFromConf) throws Exception {
-- this(hiveConf, clusterType, false, null, null, false, usePortsFromConf,
-- "KERBEROS", false, true);
-+ public MiniHS2(HiveConf hiveConf, MiniClusterType clusterType, boolean
usePortsFromConf)
-+ throws Exception {
-+ this(hiveConf, clusterType, false, null, null,
-+ false, usePortsFromConf, "KERBEROS", false, true,
-+ false, null, null);
- }
-
- public void start(Map<String, String> confOverlay) throws Exception {
-diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
-index ffce1d1aec8..6705a98bb2a 100644
---- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
-+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
-@@ -54,6 +54,7 @@
- import org.apache.hadoop.hive.common.log.ProgressMonitor;
- import org.apache.hadoop.hive.conf.HiveConf;
- import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-+import org.apache.hadoop.hive.conf.HiveConfUtil;
- import org.apache.hadoop.hive.metastore.ObjectStore;
- import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
- import org.apache.hadoop.hive.ql.MapRedStats;
-@@ -1589,10 +1590,11 @@ public void close() throws IOException {
-
- private void unCacheDataNucleusClassLoaders() {
- try {
-- Hive threadLocalHive = Hive.get(sessionConf);
-- if ((threadLocalHive != null) && (threadLocalHive.getMSC() != null)
-- && (threadLocalHive.getMSC().isLocalMetaStore())) {
-- if
(sessionConf.getVar(ConfVars.METASTORE_RAW_STORE_IMPL).equals(ObjectStore.class.getName()))
{
-+ boolean isLocalMetastore =
-+
HiveConfUtil.isEmbeddedMetaStore(sessionConf.getVar(HiveConf.ConfVars.METASTOREURIS));
-+ if (isLocalMetastore) {
-+ if (sessionConf.getVar(ConfVars.METASTORE_RAW_STORE_IMPL)
-+ .equals(ObjectStore.class.getName())) {
- ObjectStore.unCacheDataNucleusClassLoaders();
- }
- }
-diff --git
a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImplwithUGI.java
b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImplwithUGI.java
-index 00a7e742cab..444d741cd86 100644
----
a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImplwithUGI.java
-+++
b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImplwithUGI.java
-@@ -125,6 +125,8 @@ private void cancelDelegationToken() throws
HiveSQLException {
- if (hmsDelegationTokenStr != null) {
- try {
- Hive.get(getHiveConf()).cancelDelegationToken(hmsDelegationTokenStr);
-+ hmsDelegationTokenStr = null;
-+ getHiveConf().setVar(HiveConf.ConfVars.METASTORE_TOKEN_SIGNATURE, "");
- } catch (HiveException e) {
- throw new HiveSQLException("Couldn't cancel delegation token", e);
- }
-diff --git
a/shims/common/src/main/java/org/apache/hadoop/hive/thrift/DBTokenStore.java
b/shims/common/src/main/java/org/apache/hadoop/hive/thrift/DBTokenStore.java
-index d6dc0796e77..326ef4e99ca 100644
----
a/shims/common/src/main/java/org/apache/hadoop/hive/thrift/DBTokenStore.java
-+++
b/shims/common/src/main/java/org/apache/hadoop/hive/thrift/DBTokenStore.java
-@@ -152,8 +152,8 @@ private Object invokeOnTokenStore(String methName,
Object[] params, Class<?> ...
- break;
- case HIVESERVER2 :
- Object hiveObject = ((Class<?>)handler)
-- .getMethod("get", org.apache.hadoop.conf.Configuration.class,
java.lang.Class.class)
-- .invoke(handler, conf, DBTokenStore.class);
-+ .getMethod("get")
-+ .invoke(handler, null);
- tokenStore =
((Class<?>)handler).getMethod("getMSC").invoke(hiveObject);
- break;
- default:
-diff --git
a/shims/common/src/main/java/org/apache/hadoop/hive/thrift/DelegationTokenSecretManager.java
b/shims/common/src/main/java/org/apache/hadoop/hive/thrift/DelegationTokenSecretManager.java
-index 5299e18743a..6a863d8a53d 100644
----
a/shims/common/src/main/java/org/apache/hadoop/hive/thrift/DelegationTokenSecretManager.java
-+++
b/shims/common/src/main/java/org/apache/hadoop/hive/thrift/DelegationTokenSecretManager.java
-@@ -97,12 +97,15 @@ public synchronized long renewDelegationToken(String
tokenStrForm) throws IOExce
- return renewToken(t, user);
- }
-
-- public synchronized String getDelegationToken(String renewer) throws
IOException {
-- UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-- Text owner = new Text(ugi.getUserName());
-+ public synchronized String getDelegationToken(final String ownerStr, final
String renewer) throws IOException {
-+ if(ownerStr == null) {
-+ throw new RuntimeException("Delegation token owner is null");
-+ }
-+ Text owner = new Text(ownerStr);
- Text realUser = null;
-- if (ugi.getRealUser() != null) {
-- realUser = new Text(ugi.getRealUser().getUserName());
-+ UserGroupInformation currentUgi = UserGroupInformation.getCurrentUser();
-+ if (currentUgi.getUserName() != null) {
-+ realUser = new Text(currentUgi.getUserName());
- }
- DelegationTokenIdentifier ident =
- new DelegationTokenIdentifier(owner, new Text(renewer), realUser);
-diff --git
a/shims/common/src/main/java/org/apache/hadoop/hive/thrift/HiveDelegationTokenManager.java
b/shims/common/src/main/java/org/apache/hadoop/hive/thrift/HiveDelegationTokenManager.java
-index b3e4a760828..db5ec69bdd0 100644
----
a/shims/common/src/main/java/org/apache/hadoop/hive/thrift/HiveDelegationTokenManager.java
-+++
b/shims/common/src/main/java/org/apache/hadoop/hive/thrift/HiveDelegationTokenManager.java
-@@ -116,13 +116,14 @@ public String getDelegationToken(final String owner,
final String renewer, Strin
- ownerUgi = UserGroupInformation.createProxyUser(owner,
UserGroupInformation.getCurrentUser());
- ProxyUsers.authorize(ownerUgi, remoteAddr, null);
- }
-- return ownerUgi.doAs(new PrivilegedExceptionAction<String>() {
--
-- @Override
-- public String run() throws IOException {
-- return secretManager.getDelegationToken(renewer);
-- }
-- });
-+ //if impersonation is turned on this called using the
HiveSessionImplWithUGI
-+ //using sessionProxy. so the currentUser will be the impersonated user
here eg. oozie
-+ //we cannot create a proxy user which represents Oozie's client user here
since
-+ //we cannot authenticate it using Kerberos/Digest. We trust the user
which opened
-+ //session using Kerberos in this case.
-+ //if impersonation is turned off, the current user is Hive which can open
-+ //kerberos connections to HMS if required.
-+ return secretManager.getDelegationToken(owner, renewer);
- }
-
- public String getDelegationTokenWithService(String owner, String renewer,
String service, String remoteAddr)
-
diff --git a/bigtop-packages/src/deb/hive/rules
b/bigtop-packages/src/deb/hive/rules
index 6ef9b85..84df579 100755
--- a/bigtop-packages/src/deb/hive/rules
+++ b/bigtop-packages/src/deb/hive/rules
@@ -56,6 +56,3 @@ override_dh_auto_install: server2 metastore hcatalog-server
webhcat-server
# Workaround for BIGTOP-583
rm -f debian/tmp/usr/lib/hive/lib/slf4j-log4j12-*.jar
bash debian/build-hive-install-file.sh >> debian/hive.install
-
- # Hive source contains a directory docs/changes that is interpreted a
special way in Debian 7+ packaging
- mv docs/changes docs/changes_
diff --git a/bigtop.bom b/bigtop.bom
index 00bb650..cb12bc0 100644
--- a/bigtop.bom
+++ b/bigtop.bom
@@ -93,7 +93,7 @@ bigtop {
version = "1.6.0-SNAPSHOT"
stack {
'jdk' { version = "1." + ( System.getenv('BIGTOP_JDK') ?: "8" );
version_base = version }
- 'scala' { version = '2.11.8'; version_base = version }
+ 'scala' { version = '2.12.13'; version_base = version }
}
apache {
APACHE_MIRROR = "https://apache.osuosl.org"
@@ -180,7 +180,7 @@ bigtop {
'hive' {
name = 'hive'
relNotes = 'Apache Hive'
- version { base = '2.3.6'; pkg = base; release = 1 }
+ version { base = '3.1.2'; pkg = base; release = 1 }
tarball { destination = "apache-${name}-${version.base}-src.tar.gz"
source = destination }
url { download_path = "/$name/$name-${version.base}/"