This is an automated email from the ASF dual-hosted git repository.
abukor pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kudu.git
The following commit(s) were added to refs/heads/master by this push:
new 10e3cec [java] Upgrade dependencies
10e3cec is described below
commit 10e3cec12716a80d9e4afda397acfcb7ce498df0
Author: Attila Bukor <[email protected]>
AuthorDate: Wed Jan 19 08:42:55 2022 +0100
[java] Upgrade dependencies
Updgrades the Java dependencies and Gradle versions.
Major version upgrades:
- mockito - 3.8.0 -> 4.2.0
Minor version upgrades:
- commons - 2.8.0 -> 2.11.0
- jmh - 1.28 -> 1.34
- micrometer - 1.6.5 -> 1.8.2
- protobuf - 3.15.6 -> 3.19.3
Maintenance version upgrades:
- clojure - 1.10.1 -> 1.10.3
- clojure tools cli - 1.0.194 -> 1.0.206
- hadoop - 3.3.0 -> 3.3.1
- jetty - 9.4.38.v20210224 -> 9.4.44.v20210927
- netty - 4.1.65.Final -> 4.1.73.Final
- scala -> 2.12.13 -> 2.12.15
- scalatest - 3.2.6 -> 3.2.10
- slf4j - 1.7.30 -> 1.7.33
- spark -> 3.1.1 -> 3.1.2
- spark2 - 2.4.7 -> 2.4.8
Gradle upgrades:
- gradle - 6.8.3 -> 6.9.2
- gradle-versions-plugin - 0.38.0 -> 0.41.0
- osdetector-gradle-plugin - 1.6.2 -> 1.7.0
- protobuf-gradle-plugin - 0.8.15 -> 0.8.18
- nebula-clojure-plugin - 9.4.3 -> 10.1.1
- propdeps-plugin - 0.0.9.RELEASE -> 0.0.10.RELEASE
- gradle-errorprone-plugin - 1.3.0 -> 2.0.2
- gradle-animalsniffer-plugin - 1.5.3 -> 1.5.4
- gson - 2.8.6 -> 2.8.9
- guava - 30.1.-jre -> 31.0.1-jre
Change-Id: I300af97556a8c12a2e7c0deb2ae723c2ea639518
Reviewed-on: http://gerrit.cloudera.org:8080/18159
Tested-by: Attila Bukor <[email protected]>
Reviewed-by: Alexey Serbin <[email protected]>
---
build-support/verify_jars.pl | 4 ++-
java/buildSrc/build.gradle | 18 ++++++------
java/gradle/dependencies.gradle | 32 +++++++++++-----------
java/gradlew | 2 +-
.../scala/org/apache/kudu/backup/KuduBackup.scala | 8 +++---
.../scala/org/apache/kudu/backup/KuduRestore.scala | 6 ++--
.../main/java/org/apache/kudu/client/RpcProxy.java | 3 +-
.../apache/kudu/client/TestAsyncKuduClient.java | 7 +++--
.../org/apache/kudu/spark/kudu/DefaultSource.scala | 1 -
.../scala/org/apache/kudu/spark/kudu/package.scala | 2 +-
10 files changed, 42 insertions(+), 41 deletions(-)
diff --git a/build-support/verify_jars.pl b/build-support/verify_jars.pl
index 4bcef20..64186d2 100755
--- a/build-support/verify_jars.pl
+++ b/build-support/verify_jars.pl
@@ -31,7 +31,9 @@ my $pat_allow_non_java =
qr{(?:\.(?:txt|xml|properties|json|proto|MF|jnilib|so|swp)|
LICENSE|NOTICE|DEPENDENCIES|
# The kudu-spark DataSourceRegister file.
- DataSourceRegister)$}x;
+ DataSourceRegister|
+ # Netty's block hound integration file.
+ reactor\.blockhound\.integration\.BlockHoundIntegration)$}x;
# Allowed filenames of shaded dependencies in JARs.
my $pat_allow_kudu_shaded =
diff --git a/java/buildSrc/build.gradle b/java/buildSrc/build.gradle
index 50f1e37..987df45 100644
--- a/java/buildSrc/build.gradle
+++ b/java/buildSrc/build.gradle
@@ -28,19 +28,19 @@ repositories {
// Manage plugin dependencies since the plugin block can't be used in included
build scripts yet.
// For more details see:
https://docs.gradle.org/current/userguide/plugins.html#plugins_dsl_limitations
dependencies {
- compile "com.github.ben-manes:gradle-versions-plugin:0.38.0"
+ compile "com.github.ben-manes:gradle-versions-plugin:0.41.0"
compile "com.github.jengelman.gradle.plugins:shadow:6.1.0"
compile "gradle.plugin.org.barfuin.gradle.jacocolog:gradle-jacoco-log:1.2.4"
- compile "gradle.plugin.com.google.gradle:osdetector-gradle-plugin:1.6.2"
- compile "com.google.protobuf:protobuf-gradle-plugin:0.8.15"
- compile "com.netflix.nebula:nebula-clojure-plugin:9.4.3"
+ compile "gradle.plugin.com.google.gradle:osdetector-gradle-plugin:1.7.0"
+ compile "com.google.protobuf:protobuf-gradle-plugin:0.8.18"
+ compile "com.netflix.nebula:nebula-clojure-plugin:10.1.1"
compile "gradle.plugin.com.github.spotbugs.snom:spotbugs-gradle-plugin:4.7.0"
- compile "io.spring.gradle:propdeps-plugin:0.0.9.RELEASE"
- compile "net.ltgt.gradle:gradle-errorprone-plugin:1.3.0"
- compile "ru.vyarus:gradle-animalsniffer-plugin:1.5.3"
- compile "com.google.code.gson:gson:2.8.6"
+ compile "io.spring.gradle:propdeps-plugin:0.0.10.RELEASE"
+ compile "net.ltgt.gradle:gradle-errorprone-plugin:2.0.2"
+ compile "ru.vyarus:gradle-animalsniffer-plugin:1.5.4"
+ compile "com.google.code.gson:gson:2.8.9"
compile "cz.alenkacz:gradle-scalafmt:1.14.0"
- compile "com.google.guava:guava:30.1-jre"
+ compile "com.google.guava:guava:31.0.1-jre"
compile "me.champeau.gradle:jmh-gradle-plugin:0.5.3"
}
diff --git a/java/gradle/dependencies.gradle b/java/gradle/dependencies.gradle
index fe41d63..9485b63 100755
--- a/java/gradle/dependencies.gradle
+++ b/java/gradle/dependencies.gradle
@@ -27,39 +27,39 @@ ext {
versions += [
async : "1.4.1",
checkstyle : "8.36.1",
- clojure : "1.10.1",
- clojureToolsCli: "1.0.194",
- commonsIo : "2.8.0",
+ clojure : "1.10.3",
+ clojureToolsCli: "1.0.206",
+ commonsIo : "2.11.0",
errorProne : "2.3.3",
errorProneJavac: "9+181-r4173-1",
- gradle : "6.8.3",
+ gradle : "6.9.2",
guava : "30.1-jre",
- hadoop : "3.3.0",
+ hadoop : "3.3.1",
hamcrest : "2.2",
hdrhistogram : "2.1.12",
hive : "3.1.2",
httpClient : "4.5.13",
jacoco : "0.8.6",
jepsen : "0.1.5",
- jetty : "9.4.38.v20210224",
- jmh : "1.28",
+ jetty : "9.4.44.v20210927",
+ jmh : "1.34",
jsr305 : "3.0.2",
junit : "4.13.2",
log4j : "2.17.1",
- micrometer : "1.6.5",
- mockito : "3.8.0",
+ micrometer : "1.8.2",
+ mockito : "4.2.0",
murmur : "1.0.0",
- netty : "4.1.65.Final",
+ netty : "4.1.73.Final",
osdetector : "1.6.2",
- protobuf : "3.15.6",
+ protobuf : "3.19.3",
ranger : "2.1.0",
scala211 : "2.11.12",
- scala : "2.12.13",
- scalatest : "3.2.6",
+ scala : "2.12.15",
+ scalatest : "3.2.10",
scopt : "4.0.1",
- slf4j : "1.7.30",
- spark2 : "2.4.7",
- spark : "3.1.1",
+ slf4j : "1.7.33",
+ spark2 : "2.4.8",
+ spark : "3.1.2",
spotBugs : "4.1.1",
yetus : "0.13.0"
]
diff --git a/java/gradlew b/java/gradlew
index 1f64d9c..6e24fce 100755
--- a/java/gradlew
+++ b/java/gradlew
@@ -84,7 +84,7 @@ esac
# Loop in case we encounter an error.
for attempt in 1 2 3; do
if [ ! -e $APP_HOME/gradle/wrapper/gradle-wrapper.jar ]; then
- if ! curl -s -S --retry 3 -L -o
"$APP_HOME/gradle/wrapper/gradle-wrapper.jar"
"https://raw.githubusercontent.com/gradle/gradle/v6.2.2/gradle/wrapper/gradle-wrapper.jar";
then
+ if ! curl -s -S --retry 3 -L -o
"$APP_HOME/gradle/wrapper/gradle-wrapper.jar"
"https://raw.githubusercontent.com/gradle/gradle/v6.8.3/gradle/wrapper/gradle-wrapper.jar";
then
rm -f "$APP_HOME/gradle/wrapper/gradle-wrapper.jar"
# Pause for a bit before looping in case the server throttled us.
sleep 5
diff --git
a/java/kudu-backup/src/main/scala/org/apache/kudu/backup/KuduBackup.scala
b/java/kudu-backup/src/main/scala/org/apache/kudu/backup/KuduBackup.scala
index 13dcc5f..e4bcfb8 100644
--- a/java/kudu-backup/src/main/scala/org/apache/kudu/backup/KuduBackup.scala
+++ b/java/kudu-backup/src/main/scala/org/apache/kudu/backup/KuduBackup.scala
@@ -24,9 +24,10 @@ import org.apache.yetus.audience.InterfaceAudience
import org.apache.yetus.audience.InterfaceStability
import org.slf4j.Logger
import org.slf4j.LoggerFactory
+
import scala.collection.JavaConverters._
import scala.collection.parallel.ForkJoinTaskSupport
-import scala.concurrent.forkjoin.ForkJoinPool
+import scala.concurrent.forkjoin.ForkJoinPool;
import scala.util.Failure
import scala.util.Success
import scala.util.Try
@@ -139,8 +140,8 @@ object KuduBackup {
// Parallelize the processing. Managing resources of parallel backup jobs
is very complex, so
// only the simplest possible thing is attempted. Kudu trusts Spark to
manage resources.
val parallelTables = options.tables.par
- val pool = new ForkJoinPool(options.numParallelBackups) // Need a clean-up
reference.
- parallelTables.tasksupport = new ForkJoinTaskSupport(pool)
+ parallelTables.tasksupport = new ForkJoinTaskSupport(
+ new ForkJoinPool(options.numParallelBackups))
val backupResults = parallelTables.map { tableName =>
val backupResult = Try(doBackup(tableName, context, session, io,
options, backupMap))
backupResult match {
@@ -154,7 +155,6 @@ object KuduBackup {
}
(tableName, backupResult)
}
- pool.shutdown()
backupResults.filter(_._2.isFailure).foreach {
case (tableName, ex) =>
diff --git
a/java/kudu-backup/src/main/scala/org/apache/kudu/backup/KuduRestore.scala
b/java/kudu-backup/src/main/scala/org/apache/kudu/backup/KuduRestore.scala
index 92d0717..ced54fb 100644
--- a/java/kudu-backup/src/main/scala/org/apache/kudu/backup/KuduRestore.scala
+++ b/java/kudu-backup/src/main/scala/org/apache/kudu/backup/KuduRestore.scala
@@ -30,6 +30,7 @@ import org.apache.yetus.audience.InterfaceAudience
import org.apache.yetus.audience.InterfaceStability
import org.slf4j.Logger
import org.slf4j.LoggerFactory
+
import scala.collection.JavaConverters._
import scala.collection.parallel.ForkJoinTaskSupport
import scala.concurrent.forkjoin.ForkJoinPool
@@ -224,8 +225,8 @@ object KuduRestore {
// will cause subsequent restores to fail unless the table is deleted or
the restore suffix is
// changed. We ought to try to clean up the mess when a failure happens.
val parallelTables = options.tables.par
- val pool = new ForkJoinPool(options.numParallelRestores) // Need a
clean-up reference.
- parallelTables.tasksupport = new ForkJoinTaskSupport(pool)
+ parallelTables.tasksupport = new ForkJoinTaskSupport(
+ new ForkJoinPool(options.numParallelRestores))
val restoreResults = parallelTables.map { tableName =>
val restoreResult =
Try(doRestore(tableName, context, session, io, options, backupMap))
@@ -240,7 +241,6 @@ object KuduRestore {
}
(tableName, restoreResult)
}
- pool.shutdown()
restoreResults.filter(_._2.isFailure).foreach {
case (tableName, ex) =>
diff --git
a/java/kudu-client/src/main/java/org/apache/kudu/client/RpcProxy.java
b/java/kudu-client/src/main/java/org/apache/kudu/client/RpcProxy.java
index 6c4350f..6d98e4a 100644
--- a/java/kudu-client/src/main/java/org/apache/kudu/client/RpcProxy.java
+++ b/java/kudu-client/src/main/java/org/apache/kudu/client/RpcProxy.java
@@ -295,7 +295,7 @@ class RpcProxy {
} else if (decoded.getSecond() instanceof TxnManager.TxnManagerErrorPB) {
TxnManager.TxnManagerErrorPB error =
(TxnManager.TxnManagerErrorPB) decoded.getSecond();
- exception = dispatchTxnManagerError(client, connection, rpc, error,
traceBuilder);
+ exception = dispatchTxnManagerError(client, rpc, error, traceBuilder);
if (exception == null) {
// Exception was taken care of.
return;
@@ -430,7 +430,6 @@ class RpcProxy {
*/
private static KuduException dispatchTxnManagerError(
AsyncKuduClient client,
- Connection connection,
KuduRpc<?> rpc,
TxnManager.TxnManagerErrorPB pbError,
RpcTraceFrame.RpcTraceFrameBuilder tracer) {
diff --git
a/java/kudu-client/src/test/java/org/apache/kudu/client/TestAsyncKuduClient.java
b/java/kudu-client/src/test/java/org/apache/kudu/client/TestAsyncKuduClient.java
index 1a6baba..be613a4 100644
---
a/java/kudu-client/src/test/java/org/apache/kudu/client/TestAsyncKuduClient.java
+++
b/java/kudu-client/src/test/java/org/apache/kudu/client/TestAsyncKuduClient.java
@@ -32,6 +32,7 @@ import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
+import com.google.common.base.Splitter;
import com.google.common.base.Stopwatch;
import com.google.protobuf.ByteString;
import com.stumbleupon.async.Deferred;
@@ -212,14 +213,14 @@ public class TestAsyncKuduClient {
tabletPb.addInternedReplicas(ProtobufUtils.getFakeTabletInternedReplicaPB(
i, Metadata.RaftPeerPB.Role.FOLLOWER));
tabletLocations.add(tabletPb.build());
- String[] hostPort = tservers.get(i).toString().split(":");
- String tserverHost = hostPort[0];
+ List<String> hostPort =
Splitter.on(':').splitToList(tservers.get(i).toString());
+ String tserverHost = hostPort.get(0);
if (i == tserverIdx) {
// simulate IP resolve failure by hacking the hostname
tserverHost = tserverHost + "xxx";
}
tsInfos.add(ProtobufUtils.getFakeTSInfoPB("tserver",
- tserverHost, Integer.parseInt(hostPort[1])).build());
+ tserverHost, Integer.parseInt(hostPort.get(1))).build());
}
try {
asyncClient.discoverTablets(table, new byte[0], 100,
diff --git
a/java/kudu-spark/src/main/scala/org/apache/kudu/spark/kudu/DefaultSource.scala
b/java/kudu-spark/src/main/scala/org/apache/kudu/spark/kudu/DefaultSource.scala
index f621d7d..6d9c0c9 100644
---
a/java/kudu-spark/src/main/scala/org/apache/kudu/spark/kudu/DefaultSource.scala
+++
b/java/kudu-spark/src/main/scala/org/apache/kudu/spark/kudu/DefaultSource.scala
@@ -22,7 +22,6 @@ import org.apache.kudu.client.AsyncKuduClient.EncryptionPolicy
import java.net.InetAddress
import java.util.Locale
import scala.collection.JavaConverters._
-import scala.util.Try
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types._
diff --git
a/java/kudu-spark/src/main/scala/org/apache/kudu/spark/kudu/package.scala
b/java/kudu-spark/src/main/scala/org/apache/kudu/spark/kudu/package.scala
index 5b4546b..4f9e7b3 100755
--- a/java/kudu-spark/src/main/scala/org/apache/kudu/spark/kudu/package.scala
+++ b/java/kudu-spark/src/main/scala/org/apache/kudu/spark/kudu/package.scala
@@ -40,6 +40,6 @@ package object kudu {
implicit class KuduDataFrameWriter[T](writer: DataFrameWriter[T]) {
@deprecated("Use `.format(\"kudu\").save` instead", "1.9.0")
- def kudu = writer.format("org.apache.kudu.spark.kudu").save
+ def kudu() = writer.format("org.apache.kudu.spark.kudu").save
}
}