This is an automated email from the ASF dual-hosted git repository.
cshannon pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/accumulo.git
The following commit(s) were added to refs/heads/main by this push:
new 65034b65c5 Replace all instances of Guava cache with Caffeine (#3206)
65034b65c5 is described below
commit 65034b65c5f065cc3419e4df0e03a55d3b5d94fd
Author: Christopher L. Shannon <[email protected]>
AuthorDate: Fri Mar 3 08:14:16 2023 -0500
Replace all instances of Guava cache with Caffeine (#3206)
Caffeine is the replacement for Guava and has several improvements. This
commit replaces all instances of Guava with Caffeine for consistency and
also adds a checkstyle rule to prevent using Guava in the future.
---
.../classloader/URLContextClassLoaderFactory.java | 32 +++++++++-------------
.../accumulo/core/clientImpl/bulk/BulkImport.java | 6 ++--
.../org/apache/accumulo/core/data/InstanceId.java | 14 +++-------
.../org/apache/accumulo/core/data/NamespaceId.java | 15 +++-------
.../org/apache/accumulo/core/data/TableId.java | 15 +++-------
.../apache/accumulo/core/file/FileOperations.java | 2 +-
.../file/blockfile/impl/CachableBlockFile.java | 13 ++++++---
.../file/rfile/bcfile/CompressionAlgorithm.java | 20 ++++----------
.../apache/accumulo/core/iterators/Combiner.java | 29 ++++++++------------
.../spi/balancer/HostRegexTableLoadBalancer.java | 16 ++++-------
.../core/spi/fs/SpaceAwareVolumeChooser.java | 21 ++++----------
.../org/apache/accumulo/core/summary/Gatherer.java | 2 +-
.../accumulo/core/summary/SummaryReader.java | 2 +-
.../accumulo/core/util/tables/TableZooHelper.java | 13 +++------
.../accumulo/core/iterators/CombinerTestUtil.java | 2 +-
.../core/spi/fs/SpaceAwareVolumeChooserTest.java | 4 +--
pom.xml | 4 +++
.../org/apache/accumulo/server/fs/FileManager.java | 2 +-
server/manager/pom.xml | 4 +++
.../accumulo/manager/recovery/RecoveryManager.java | 18 ++++++++----
.../accumulo/tserver/TabletClientHandler.java | 2 +-
.../tserver/TabletServerResourceManager.java | 6 ++--
22 files changed, 98 insertions(+), 144 deletions(-)
diff --git
a/core/src/main/java/org/apache/accumulo/core/classloader/URLContextClassLoaderFactory.java
b/core/src/main/java/org/apache/accumulo/core/classloader/URLContextClassLoaderFactory.java
index ce36667a1f..26b06e39bc 100644
---
a/core/src/main/java/org/apache/accumulo/core/classloader/URLContextClassLoaderFactory.java
+++
b/core/src/main/java/org/apache/accumulo/core/classloader/URLContextClassLoaderFactory.java
@@ -22,16 +22,14 @@ import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLClassLoader;
import java.util.Arrays;
-import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.stream.Collectors;
import org.apache.accumulo.core.spi.common.ContextClassLoaderFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
+import com.github.benmanes.caffeine.cache.Cache;
+import com.github.benmanes.caffeine.cache.Caffeine;
/**
* The default implementation of ContextClassLoaderFactory. This classloader
returns a
@@ -49,7 +47,7 @@ public class URLContextClassLoaderFactory implements
ContextClassLoaderFactory {
// Classes that are loaded contain a reference to the class loader used to
load them
// so the class loader will be garbage collected when no more classes are
loaded that reference it
private final Cache<String,URLClassLoader> classloaders =
- CacheBuilder.newBuilder().weakValues().build();
+ Caffeine.newBuilder().weakValues().build();
public URLContextClassLoaderFactory() {
if (!isInstantiated.compareAndSet(false, true)) {
@@ -63,19 +61,15 @@ public class URLContextClassLoaderFactory implements
ContextClassLoaderFactory {
throw new IllegalArgumentException("Unknown context");
}
- try {
- return classloaders.get(context, () -> {
- LOG.debug("Creating URLClassLoader for context, uris: {}", context);
- return new URLClassLoader(Arrays.stream(context.split(",")).map(url ->
{
- try {
- return new URL(url);
- } catch (MalformedURLException e) {
- throw new RuntimeException(e);
- }
- }).collect(Collectors.toList()).toArray(new URL[] {}),
ClassLoader.getSystemClassLoader());
- });
- } catch (ExecutionException e) {
- throw new RuntimeException(e);
- }
+ return classloaders.get(context, k -> {
+ LOG.debug("Creating URLClassLoader for context, uris: {}", context);
+ return new URLClassLoader(Arrays.stream(context.split(",")).map(url -> {
+ try {
+ return new URL(url);
+ } catch (MalformedURLException e) {
+ throw new RuntimeException(e);
+ }
+ }).toArray(URL[]::new), ClassLoader.getSystemClassLoader());
+ });
}
}
diff --git
a/core/src/main/java/org/apache/accumulo/core/clientImpl/bulk/BulkImport.java
b/core/src/main/java/org/apache/accumulo/core/clientImpl/bulk/BulkImport.java
index 4810048e93..8b9516ad7a 100644
---
a/core/src/main/java/org/apache/accumulo/core/clientImpl/bulk/BulkImport.java
+++
b/core/src/main/java/org/apache/accumulo/core/clientImpl/bulk/BulkImport.java
@@ -85,9 +85,9 @@ import org.apache.hadoop.io.Text;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.github.benmanes.caffeine.cache.Cache;
+import com.github.benmanes.caffeine.cache.Caffeine;
import com.google.common.base.Preconditions;
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
import com.google.common.collect.Sets;
public class BulkImport implements ImportDestinationArguments,
ImportMappingOptions {
@@ -379,7 +379,7 @@ public class BulkImport implements
ImportDestinationArguments, ImportMappingOpti
Map<String,Long> absFileLens = new HashMap<>();
fileLens.forEach((k, v) -> absFileLens.put(pathToCacheId(new Path(dir,
k)), v));
- Cache<String,Long> fileLenCache = CacheBuilder.newBuilder().build();
+ Cache<String,Long> fileLenCache = Caffeine.newBuilder().build();
fileLenCache.putAll(absFileLens);
diff --git a/core/src/main/java/org/apache/accumulo/core/data/InstanceId.java
b/core/src/main/java/org/apache/accumulo/core/data/InstanceId.java
index 598d35937d..70e90192ea 100644
--- a/core/src/main/java/org/apache/accumulo/core/data/InstanceId.java
+++ b/core/src/main/java/org/apache/accumulo/core/data/InstanceId.java
@@ -20,10 +20,9 @@ package org.apache.accumulo.core.data;
import java.util.Objects;
import java.util.UUID;
-import java.util.concurrent.ExecutionException;
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
+import com.github.benmanes.caffeine.cache.Cache;
+import com.github.benmanes.caffeine.cache.Caffeine;
/**
* A strongly typed representation of an Accumulo instance ID. The constructor
for this class will
@@ -36,7 +35,7 @@ public class InstanceId extends AbstractId<InstanceId> {
// cache is for canonicalization/deduplication of created objects,
// to limit the number of InstanceId objects in the JVM at any given moment
// WeakReferences are used because we don't need them to stick around any
longer than they need to
- static final Cache<String,InstanceId> cache =
CacheBuilder.newBuilder().weakValues().build();
+ static final Cache<String,InstanceId> cache =
Caffeine.newBuilder().weakValues().build();
private InstanceId(String canonical) {
super(canonical);
@@ -49,12 +48,7 @@ public class InstanceId extends AbstractId<InstanceId> {
* @return InstanceId object
*/
public static InstanceId of(final String canonical) {
- try {
- return cache.get(canonical, () -> new InstanceId(canonical));
- } catch (ExecutionException e) {
- throw new AssertionError(
- "This should never happen: ID constructor should never return
null.");
- }
+ return cache.get(canonical, k -> new InstanceId(canonical));
}
/**
diff --git a/core/src/main/java/org/apache/accumulo/core/data/NamespaceId.java
b/core/src/main/java/org/apache/accumulo/core/data/NamespaceId.java
index 90a8c74e1b..c201113ce3 100644
--- a/core/src/main/java/org/apache/accumulo/core/data/NamespaceId.java
+++ b/core/src/main/java/org/apache/accumulo/core/data/NamespaceId.java
@@ -18,10 +18,8 @@
*/
package org.apache.accumulo.core.data;
-import java.util.concurrent.ExecutionException;
-
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
+import com.github.benmanes.caffeine.cache.Cache;
+import com.github.benmanes.caffeine.cache.Caffeine;
/**
* A strongly typed representation of a namespace ID. This class cannot be
used to get a namespace
@@ -35,7 +33,7 @@ public class NamespaceId extends AbstractId<NamespaceId> {
// cache is for canonicalization/deduplication of created objects,
// to limit the number of NamespaceId objects in the JVM at any given moment
// WeakReferences are used because we don't need them to stick around any
longer than they need to
- static final Cache<String,NamespaceId> cache =
CacheBuilder.newBuilder().weakValues().build();
+ static final Cache<String,NamespaceId> cache =
Caffeine.newBuilder().weakValues().build();
private NamespaceId(String canonical) {
super(canonical);
@@ -48,11 +46,6 @@ public class NamespaceId extends AbstractId<NamespaceId> {
* @return NamespaceId object
*/
public static NamespaceId of(final String canonical) {
- try {
- return cache.get(canonical, () -> new NamespaceId(canonical));
- } catch (ExecutionException e) {
- throw new AssertionError(
- "This should never happen: ID constructor should never return
null.");
- }
+ return cache.get(canonical, k -> new NamespaceId(canonical));
}
}
diff --git a/core/src/main/java/org/apache/accumulo/core/data/TableId.java
b/core/src/main/java/org/apache/accumulo/core/data/TableId.java
index 9493d8dc16..a97c9d2f62 100644
--- a/core/src/main/java/org/apache/accumulo/core/data/TableId.java
+++ b/core/src/main/java/org/apache/accumulo/core/data/TableId.java
@@ -18,10 +18,8 @@
*/
package org.apache.accumulo.core.data;
-import java.util.concurrent.ExecutionException;
-
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
+import com.github.benmanes.caffeine.cache.Cache;
+import com.github.benmanes.caffeine.cache.Caffeine;
/**
* A strongly typed representation of a table ID. This class cannot be used to
get a table ID from a
@@ -35,7 +33,7 @@ public class TableId extends AbstractId<TableId> {
// cache is for canonicalization/deduplication of created objects,
// to limit the number of TableId objects in the JVM at any given moment
// WeakReferences are used because we don't need them to stick around any
longer than they need to
- static final Cache<String,TableId> cache =
CacheBuilder.newBuilder().weakValues().build();
+ static final Cache<String,TableId> cache =
Caffeine.newBuilder().weakValues().build();
private TableId(final String canonical) {
super(canonical);
@@ -48,11 +46,6 @@ public class TableId extends AbstractId<TableId> {
* @return TableId object
*/
public static TableId of(final String canonical) {
- try {
- return cache.get(canonical, () -> new TableId(canonical));
- } catch (ExecutionException e) {
- throw new AssertionError(
- "This should never happen: ID constructor should never return
null.");
- }
+ return cache.get(canonical, k -> new TableId(canonical));
}
}
diff --git
a/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
b/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
index db82b0d149..46a4359cb0 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapred.FileOutputCommitter;
-import com.google.common.cache.Cache;
+import com.github.benmanes.caffeine.cache.Cache;
public abstract class FileOperations {
diff --git
a/core/src/main/java/org/apache/accumulo/core/file/blockfile/impl/CachableBlockFile.java
b/core/src/main/java/org/apache/accumulo/core/file/blockfile/impl/CachableBlockFile.java
index ee18347c78..f106ba8e6b 100644
---
a/core/src/main/java/org/apache/accumulo/core/file/blockfile/impl/CachableBlockFile.java
+++
b/core/src/main/java/org/apache/accumulo/core/file/blockfile/impl/CachableBlockFile.java
@@ -26,7 +26,6 @@ import java.io.UncheckedIOException;
import java.util.Collections;
import java.util.Map;
import java.util.Objects;
-import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Supplier;
@@ -48,7 +47,7 @@ import org.apache.hadoop.fs.Seekable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.cache.Cache;
+import com.github.benmanes.caffeine.cache.Cache;
/**
* This is a wrapper class for BCFile that includes a cache for independent
caches for datablocks
@@ -170,8 +169,14 @@ public class CachableBlockFile {
private long getCachedFileLen() throws IOException {
try {
- return fileLenCache.get(cacheId, lengthSupplier::get);
- } catch (ExecutionException e) {
+ return fileLenCache.get(cacheId, k -> {
+ try {
+ return lengthSupplier.get();
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+ });
+ } catch (Exception e) {
throw new IOException("Failed to get " + cacheId + " len from cache ",
e);
}
}
diff --git
a/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/CompressionAlgorithm.java
b/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/CompressionAlgorithm.java
index a82dd8a825..f76983ccc4 100644
---
a/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/CompressionAlgorithm.java
+++
b/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/CompressionAlgorithm.java
@@ -25,7 +25,6 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Map.Entry;
-import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicBoolean;
import
org.apache.accumulo.core.spi.file.rfile.compression.CompressionAlgorithmConfiguration;
@@ -41,9 +40,8 @@ import org.apache.hadoop.util.ReflectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.LoadingCache;
+import com.github.benmanes.caffeine.cache.Caffeine;
+import com.github.benmanes.caffeine.cache.LoadingCache;
import com.google.common.collect.Maps;
/**
@@ -113,12 +111,8 @@ public class CompressionAlgorithm extends Configured {
* Guava cache to have a limited factory pattern defined in the Algorithm
enum.
*/
private static
LoadingCache<Entry<CompressionAlgorithm,Integer>,CompressionCodec> codecCache =
- CacheBuilder.newBuilder().maximumSize(25).build(new CacheLoader<>() {
- @Override
- public CompressionCodec load(Entry<CompressionAlgorithm,Integer> key) {
- return key.getKey().createNewCodec(key.getValue());
- }
- });
+ Caffeine.newBuilder().maximumSize(25)
+ .build(key -> key.getKey().createNewCodec(key.getValue()));
// Data input buffer size to absorb small reads from application.
protected static final int DATA_IBUF_SIZE = 1024;
@@ -170,11 +164,7 @@ public class CompressionAlgorithm extends Configured {
// If the default buffer size is not being used, pull from the loading
cache.
if (bufferSize != defaultBufferSize) {
Entry<CompressionAlgorithm,Integer> sizeOpt =
Maps.immutableEntry(algorithm, bufferSize);
- try {
- codec = codecCache.get(sizeOpt);
- } catch (ExecutionException e) {
- throw new IOException(e);
- }
+ codec = codecCache.get(sizeOpt);
}
CompressionInputStream cis = codec.createInputStream(stream, decompressor);
return new BufferedInputStream(cis, DATA_IBUF_SIZE);
diff --git
a/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java
b/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java
index 40922bda04..5548441926 100644
--- a/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java
+++ b/core/src/main/java/org/apache/accumulo/core/iterators/Combiner.java
@@ -27,7 +27,6 @@ import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
-import java.util.concurrent.ExecutionException;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.client.IteratorSetting.Column;
@@ -43,10 +42,10 @@ import org.apache.hadoop.io.Text;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.github.benmanes.caffeine.cache.Cache;
+import com.github.benmanes.caffeine.cache.Caffeine;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Splitter;
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
import com.google.common.collect.Lists;
/**
@@ -187,23 +186,19 @@ public abstract class Combiner extends WrappingIterator
implements OptionDescrib
@VisibleForTesting
static final Cache<String,Boolean> loggedMsgCache =
- CacheBuilder.newBuilder().expireAfterWrite(1,
HOURS).maximumSize(10000).build();
+ Caffeine.newBuilder().expireAfterWrite(1,
HOURS).maximumSize(10000).build();
private void sawDelete() {
if (isMajorCompaction && !reduceOnFullCompactionOnly) {
- try {
- loggedMsgCache.get(this.getClass().getName(), () -> {
- sawDeleteLog.error(
- "Combiner of type {} saw a delete during a"
- + " partial compaction. This could cause undesired results.
See"
- + " ACCUMULO-2232. Will not log subsequent occurrences for
at least 1 hour.",
- Combiner.this.getClass().getSimpleName());
- // the value is not used and does not matter
- return Boolean.TRUE;
- });
- } catch (ExecutionException e) {
- throw new RuntimeException(e);
- }
+ var ignored = loggedMsgCache.get(this.getClass().getName(), k -> {
+ sawDeleteLog.error(
+ "Combiner of type {} saw a delete during a"
+ + " partial compaction. This could cause undesired results.
See"
+ + " ACCUMULO-2232. Will not log subsequent occurrences for at
least 1 hour.",
+ Combiner.this.getClass().getSimpleName());
+ // the value is not used and does not matter
+ return Boolean.TRUE;
+ });
}
}
diff --git
a/core/src/main/java/org/apache/accumulo/core/spi/balancer/HostRegexTableLoadBalancer.java
b/core/src/main/java/org/apache/accumulo/core/spi/balancer/HostRegexTableLoadBalancer.java
index bba5cbfd0d..08fcc9f1a9 100644
---
a/core/src/main/java/org/apache/accumulo/core/spi/balancer/HostRegexTableLoadBalancer.java
+++
b/core/src/main/java/org/apache/accumulo/core/spi/balancer/HostRegexTableLoadBalancer.java
@@ -59,9 +59,8 @@ import org.apache.commons.lang3.builder.ToStringStyle;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.LoadingCache;
+import com.github.benmanes.caffeine.cache.Caffeine;
+import com.github.benmanes.caffeine.cache.LoadingCache;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
@@ -262,7 +261,7 @@ public class HostRegexTableLoadBalancer extends
TableLoadBalancer {
}
private void checkTableConfig(TableId tableId) {
- Map<String,String> tableRegexes =
tablesRegExCache.getUnchecked(tableId).get();
+ Map<String,String> tableRegexes = tablesRegExCache.get(tableId).get();
if (!hrtlbConf.get().regexes.equals(tableRegexes)) {
LoggerFactory.getLogger(HostRegexTableLoadBalancer.class).warn(
@@ -322,13 +321,8 @@ public class HostRegexTableLoadBalancer extends
TableLoadBalancer {
this.hrtlbConf =
balancerEnvironment.getConfiguration().getDerived(HrtlbConf::new);
tablesRegExCache =
- CacheBuilder.newBuilder().expireAfterAccess(1, HOURS).build(new
CacheLoader<>() {
- @Override
- public Supplier<Map<String,String>> load(TableId key) {
- return balancerEnvironment.getConfiguration(key)
- .getDerived(HostRegexTableLoadBalancer::getRegexes);
- }
- });
+ Caffeine.newBuilder().expireAfterAccess(1, HOURS).build(key ->
balancerEnvironment
+
.getConfiguration(key).getDerived(HostRegexTableLoadBalancer::getRegexes));
LOG.info("{}", this);
}
diff --git
a/core/src/main/java/org/apache/accumulo/core/spi/fs/SpaceAwareVolumeChooser.java
b/core/src/main/java/org/apache/accumulo/core/spi/fs/SpaceAwareVolumeChooser.java
index 20b92e001e..efea66dc7a 100644
---
a/core/src/main/java/org/apache/accumulo/core/spi/fs/SpaceAwareVolumeChooser.java
+++
b/core/src/main/java/org/apache/accumulo/core/spi/fs/SpaceAwareVolumeChooser.java
@@ -25,7 +25,6 @@ import java.security.SecureRandom;
import java.util.NavigableMap;
import java.util.Set;
import java.util.TreeMap;
-import java.util.concurrent.ExecutionException;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
@@ -35,9 +34,8 @@ import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.LoadingCache;
+import com.github.benmanes.caffeine.cache.Caffeine;
+import com.github.benmanes.caffeine.cache.LoadingCache;
/**
* A {@link PreferredVolumeChooser} that takes remaining HDFS space into
account when making a
@@ -68,11 +66,7 @@ public class SpaceAwareVolumeChooser extends
PreferredVolumeChooser {
@Override
public String choose(VolumeChooserEnvironment env, Set<String> options) {
- try {
- return getCache(env).get(getPreferredVolumes(env, options)).next();
- } catch (ExecutionException e) {
- throw new IllegalStateException("Execution exception when attempting to
cache choice", e);
- }
+ return getCache(env).get(getPreferredVolumes(env, options)).next();
}
private synchronized LoadingCache<Set<String>,WeightedRandomCollection>
@@ -84,13 +78,8 @@ public class SpaceAwareVolumeChooser extends
PreferredVolumeChooser {
long computationCacheDuration = StringUtils.isNotBlank(propertyValue)
? Long.parseLong(propertyValue) : defaultComputationCacheDuration;
- choiceCache = CacheBuilder.newBuilder()
- .expireAfterWrite(computationCacheDuration, MILLISECONDS).build(new
CacheLoader<>() {
- @Override
- public WeightedRandomCollection load(Set<String> key) {
- return new WeightedRandomCollection(key, env);
- }
- });
+ choiceCache =
Caffeine.newBuilder().expireAfterWrite(computationCacheDuration, MILLISECONDS)
+ .build(key -> new WeightedRandomCollection(key, env));
}
return choiceCache;
diff --git a/core/src/main/java/org/apache/accumulo/core/summary/Gatherer.java
b/core/src/main/java/org/apache/accumulo/core/summary/Gatherer.java
index 94822a2fa6..09bf022902 100644
--- a/core/src/main/java/org/apache/accumulo/core/summary/Gatherer.java
+++ b/core/src/main/java/org/apache/accumulo/core/summary/Gatherer.java
@@ -83,8 +83,8 @@ import org.apache.thrift.transport.TTransportException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.github.benmanes.caffeine.cache.Cache;
import com.google.common.base.Preconditions;
-import com.google.common.cache.Cache;
import com.google.common.hash.Hashing;
import com.google.common.net.HostAndPort;
diff --git
a/core/src/main/java/org/apache/accumulo/core/summary/SummaryReader.java
b/core/src/main/java/org/apache/accumulo/core/summary/SummaryReader.java
index 2c83986964..d9bca52264 100644
--- a/core/src/main/java/org/apache/accumulo/core/summary/SummaryReader.java
+++ b/core/src/main/java/org/apache/accumulo/core/summary/SummaryReader.java
@@ -45,7 +45,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.WritableUtils;
-import com.google.common.cache.Cache;
+import com.github.benmanes.caffeine.cache.Cache;
public class SummaryReader {
diff --git
a/core/src/main/java/org/apache/accumulo/core/util/tables/TableZooHelper.java
b/core/src/main/java/org/apache/accumulo/core/util/tables/TableZooHelper.java
index 6345f9d5dd..6ea16a7499 100644
---
a/core/src/main/java/org/apache/accumulo/core/util/tables/TableZooHelper.java
+++
b/core/src/main/java/org/apache/accumulo/core/util/tables/TableZooHelper.java
@@ -25,7 +25,6 @@ import static
org.apache.accumulo.core.util.Validators.EXISTING_TABLE_NAME;
import java.util.List;
import java.util.Objects;
-import java.util.concurrent.ExecutionException;
import org.apache.accumulo.core.Constants;
import org.apache.accumulo.core.client.NamespaceNotFoundException;
@@ -37,8 +36,8 @@ import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.fate.zookeeper.ZooCache;
import org.apache.accumulo.core.manager.state.tables.TableState;
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
+import com.github.benmanes.caffeine.cache.Cache;
+import com.github.benmanes.caffeine.cache.Caffeine;
public class TableZooHelper implements AutoCloseable {
@@ -46,7 +45,7 @@ public class TableZooHelper implements AutoCloseable {
// Per instance cache will expire after 10 minutes in case we
// encounter an instance not used frequently
private final Cache<TableZooHelper,TableMap> instanceToMapCache =
- CacheBuilder.newBuilder().expireAfterAccess(10, MINUTES).build();
+ Caffeine.newBuilder().expireAfterAccess(10, MINUTES).build();
public TableZooHelper(ClientContext context) {
this.context = Objects.requireNonNull(context);
@@ -112,11 +111,7 @@ public class TableZooHelper implements AutoCloseable {
}
private TableMap getCachedTableMap() {
- try {
- return instanceToMapCache.get(this, () -> new TableMap(context));
- } catch (ExecutionException e) {
- throw new RuntimeException(e);
- }
+ return instanceToMapCache.get(this, k -> new TableMap(context));
}
public boolean tableNodeExists(TableId tableId) {
diff --git
a/core/src/test/java/org/apache/accumulo/core/iterators/CombinerTestUtil.java
b/core/src/test/java/org/apache/accumulo/core/iterators/CombinerTestUtil.java
index 18904cd501..39567964b5 100644
---
a/core/src/test/java/org/apache/accumulo/core/iterators/CombinerTestUtil.java
+++
b/core/src/test/java/org/apache/accumulo/core/iterators/CombinerTestUtil.java
@@ -24,6 +24,6 @@ public class CombinerTestUtil {
}
public static long cacheSize() {
- return Combiner.loggedMsgCache.size();
+ return Combiner.loggedMsgCache.estimatedSize();
}
}
diff --git
a/core/src/test/java/org/apache/accumulo/core/spi/fs/SpaceAwareVolumeChooserTest.java
b/core/src/test/java/org/apache/accumulo/core/spi/fs/SpaceAwareVolumeChooserTest.java
index abdc3b7ab6..663efce240 100644
---
a/core/src/test/java/org/apache/accumulo/core/spi/fs/SpaceAwareVolumeChooserTest.java
+++
b/core/src/test/java/org/apache/accumulo/core/spi/fs/SpaceAwareVolumeChooserTest.java
@@ -31,8 +31,6 @@ import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
-import com.google.common.util.concurrent.UncheckedExecutionException;
-
public class SpaceAwareVolumeChooserTest {
VolumeChooserEnvironment chooserEnv = null;
@@ -124,7 +122,7 @@ public class SpaceAwareVolumeChooserTest {
@Test
public void testNoFreeSpace() {
testSpecificSetup(0L, 0L, null, 1, false);
- assertThrows(UncheckedExecutionException.class, this::makeChoices);
+ assertThrows(IllegalStateException.class, this::makeChoices);
}
@Test
diff --git a/pom.xml b/pom.xml
index 0601575f4a..27ca1c9a34 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1129,6 +1129,10 @@
<property name="format" value="&quot; [+] &quot;" />
<property name="message" value="Unnecessary concatenation of
string literals" />
</module>
+ <module name="RegexpSinglelineJava">
+ <property name="format" value="com.google.common.cache[.]" />
+ <property name="message" value="Please use Caffeine Cache,
not Guava" />
+ </module>
<module name="OuterTypeFilename" />
<module name="AvoidStarImport" />
<module name="NoLineWrap" />
diff --git
a/server/base/src/main/java/org/apache/accumulo/server/fs/FileManager.java
b/server/base/src/main/java/org/apache/accumulo/server/fs/FileManager.java
index b859e7a39a..8415a988d0 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/FileManager.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/FileManager.java
@@ -61,7 +61,7 @@ import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.cache.Cache;
+import com.github.benmanes.caffeine.cache.Cache;
public class FileManager {
diff --git a/server/manager/pom.xml b/server/manager/pom.xml
index 988f04412d..6166045088 100644
--- a/server/manager/pom.xml
+++ b/server/manager/pom.xml
@@ -31,6 +31,10 @@
<name>Apache Accumulo Manager Server</name>
<description>The manager server for Apache Accumulo for load balancing and
other system-wide operations.</description>
<dependencies>
+ <dependency>
+ <groupId>com.github.ben-manes.caffeine</groupId>
+ <artifactId>caffeine</artifactId>
+ </dependency>
<dependency>
<groupId>com.google.auto.service</groupId>
<artifactId>auto-service</artifactId>
diff --git
a/server/manager/src/main/java/org/apache/accumulo/manager/recovery/RecoveryManager.java
b/server/manager/src/main/java/org/apache/accumulo/manager/recovery/RecoveryManager.java
index 7f0e47448d..cb0fd76dc2 100644
---
a/server/manager/src/main/java/org/apache/accumulo/manager/recovery/RecoveryManager.java
+++
b/server/manager/src/main/java/org/apache/accumulo/manager/recovery/RecoveryManager.java
@@ -22,13 +22,13 @@ import static java.nio.charset.StandardCharsets.UTF_8;
import java.io.FileNotFoundException;
import java.io.IOException;
+import java.io.UncheckedIOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
-import java.util.concurrent.ExecutionException;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
@@ -52,8 +52,8 @@ import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
+import com.github.benmanes.caffeine.cache.Cache;
+import com.github.benmanes.caffeine.cache.Caffeine;
public class RecoveryManager {
@@ -70,7 +70,7 @@ public class RecoveryManager {
public RecoveryManager(Manager manager, long timeToCacheExistsInMillis) {
this.manager = manager;
existenceCache =
- CacheBuilder.newBuilder().expireAfterWrite(timeToCacheExistsInMillis,
TimeUnit.MILLISECONDS)
+ Caffeine.newBuilder().expireAfterWrite(timeToCacheExistsInMillis,
TimeUnit.MILLISECONDS)
.maximumWeight(10_000_000).weigher((path, exist) ->
path.toString().length()).build();
executor =
ThreadPools.getServerThreadPools().createScheduledExecutorService(4,
@@ -144,8 +144,14 @@ public class RecoveryManager {
private boolean exists(final Path path) throws IOException {
try {
- return existenceCache.get(path, () ->
manager.getVolumeManager().exists(path));
- } catch (ExecutionException e) {
+ return existenceCache.get(path, k -> {
+ try {
+ return manager.getVolumeManager().exists(path);
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+ });
+ } catch (Exception e) {
throw new IOException(e);
}
}
diff --git
a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletClientHandler.java
b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletClientHandler.java
index 09aa2169c4..4e78024a12 100644
---
a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletClientHandler.java
+++
b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletClientHandler.java
@@ -133,7 +133,7 @@ import org.apache.zookeeper.KeeperException.NoNodeException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.cache.Cache;
+import com.github.benmanes.caffeine.cache.Cache;
import io.opentelemetry.api.trace.Span;
import io.opentelemetry.context.Scope;
diff --git
a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
index 96183a8e5b..0c4c11ba58 100644
---
a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
+++
b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletServerResourceManager.java
@@ -79,10 +79,10 @@ import org.apache.accumulo.tserver.tablet.Tablet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.github.benmanes.caffeine.cache.Cache;
+import com.github.benmanes.caffeine.cache.Caffeine;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Suppliers;
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
@@ -365,7 +365,7 @@ public class TabletServerResourceManager {
int maxOpenFiles = acuConf.getCount(Property.TSERV_SCAN_MAX_OPENFILES);
fileLenCache =
- CacheBuilder.newBuilder().maximumSize(Math.min(maxOpenFiles * 1000L,
100_000)).build();
+ Caffeine.newBuilder().maximumSize(Math.min(maxOpenFiles * 1000L,
100_000)).build();
fileManager = new FileManager(context, maxOpenFiles, fileLenCache);