(accumulo-access) branch main updated: Modified Authorizations.of to only accept a Set (#68)

2024-03-12 Thread dlmarion
This is an automated email from the ASF dual-hosted git repository.

dlmarion pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/accumulo-access.git


The following commit(s) were added to refs/heads/main by this push:
 new 33be659  Modified Authorizations.of to only accept a Set (#68)
33be659 is described below

commit 33be6595160d0f62041731a720cc2b80f898c243
Author: Dave Marion 
AuthorDate: Tue Mar 12 13:47:23 2024 -0400

Modified Authorizations.of to only accept a Set (#68)

Closes #66

-

Co-authored-by: Keith Turner 
---
 .../antlr/AccessExpressionAntlrBenchmark.java  |  3 +-
 .../accumulo/access/grammar/antlr/Antlr4Tests.java |  7 ++--
 .../apache/accumulo/access/AccessEvaluator.java|  7 
 .../accumulo/access/AccessEvaluatorImpl.java   |  8 
 .../accumulo/access/AccessExpressionImpl.java  |  2 +-
 .../org/apache/accumulo/access/Authorizations.java | 21 ++
 src/test/java/example/AccessExample.java   |  4 +-
 .../accumulo/access/AccessEvaluatorTest.java   | 18 +
 .../accumulo/access/AccessExpressionBenchmark.java |  7 ++--
 .../apache/accumulo/access/AuthorizationTest.java  | 45 ++
 10 files changed, 84 insertions(+), 38 deletions(-)

diff --git 
a/src/it/antlr4-example/src/test/java/org/apache/accumulo/access/grammar/antlr/AccessExpressionAntlrBenchmark.java
 
b/src/it/antlr4-example/src/test/java/org/apache/accumulo/access/grammar/antlr/AccessExpressionAntlrBenchmark.java
index 0c3422f..75ec0a7 100644
--- 
a/src/it/antlr4-example/src/test/java/org/apache/accumulo/access/grammar/antlr/AccessExpressionAntlrBenchmark.java
+++ 
b/src/it/antlr4-example/src/test/java/org/apache/accumulo/access/grammar/antlr/AccessExpressionAntlrBenchmark.java
@@ -25,6 +25,7 @@ import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
+import java.util.Set;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
@@ -86,7 +87,7 @@ public class AccessExpressionAntlrBenchmark {
 et.expressions = new ArrayList<>();
 
 et.evaluator = new AccessExpressionAntlrEvaluator(
-
Stream.of(testDataSet.auths).map(Authorizations::of).collect(Collectors.toList()));
+Stream.of(testDataSet.auths).map(a -> 
Authorizations.of(Set.of(a))).collect(Collectors.toList()));
 
 for (var tests : testDataSet.tests) {
   if (tests.expectedResult != TestDataLoader.ExpectedResult.ERROR) {
diff --git 
a/src/it/antlr4-example/src/test/java/org/apache/accumulo/access/grammar/antlr/Antlr4Tests.java
 
b/src/it/antlr4-example/src/test/java/org/apache/accumulo/access/grammar/antlr/Antlr4Tests.java
index be40f3b..09870ae 100644
--- 
a/src/it/antlr4-example/src/test/java/org/apache/accumulo/access/grammar/antlr/Antlr4Tests.java
+++ 
b/src/it/antlr4-example/src/test/java/org/apache/accumulo/access/grammar/antlr/Antlr4Tests.java
@@ -27,6 +27,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.util.List;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.Set;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
@@ -120,7 +121,7 @@ public class Antlr4Tests {
   @Test
   public void testSimpleEvaluation() throws Exception {
 String accessExpression = "(one)|(foo)";
-Authorizations auths = Authorizations.of("four", "three", "one", "two");
+Authorizations auths = Authorizations.of(Set.of("four", "three", "one", 
"two"));
 AccessExpressionAntlrEvaluator eval = new 
AccessExpressionAntlrEvaluator(List.of(auths));
 assertTrue(eval.canAccess(accessExpression));
   }
@@ -128,7 +129,7 @@ public class Antlr4Tests {
   @Test
   public void testSimpleEvaluationFailure() throws Exception {
 String accessExpression = "(A)";
-Authorizations auths = Authorizations.of("A", "C");
+Authorizations auths = Authorizations.of(Set.of("A", "C"));
 AccessExpressionAntlrEvaluator eval = new 
AccessExpressionAntlrEvaluator(List.of(auths));
 assertFalse(eval.canAccess(accessExpression));
   }
@@ -141,7 +142,7 @@ public class Antlr4Tests {
 for (TestDataSet testSet : testData) {
 
   List authSets =
-  
Stream.of(testSet.auths).map(Authorizations::of).collect(Collectors.toList());
+  Stream.of(testSet.auths).map(a -> 
Authorizations.of(Set.of(a))).collect(Collectors.toList());
   AccessEvaluator evaluator = AccessEvaluator.of(authSets);
   AccessExpressionAntlrEvaluator antlr = new 
AccessExpressionAntlrEvaluator(authSets);
 
diff --git a/src/main/java/org/apache/accumulo/access/AccessEvaluator.java 
b/src/main/java/org/apache/accumulo/access/AccessEvaluator.java
index 1ea4d23..3eff0d1 100644
--- a/src/main/java/org/apache/accumulo/access/AccessEvaluator.java
+++ b/src/main/java/org/apache/accumulo/access/AccessEvaluator.java
@@ -152,13 +152,6 @@ public interface AccessEvaluator {
 

(accumulo) 01/01: Merge branch '2.1'

2024-03-12 Thread ddanielr
This is an automated email from the ASF dual-hosted git repository.

ddanielr pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/accumulo.git

commit 171a1e144caa9d2eb382846d2dc45a424487d3ec
Merge: 162b8effb1 92331ea113
Author: Daniel Roberts 
AuthorDate: Tue Mar 12 15:17:40 2024 +

Merge branch '2.1'

 .../accumulo/core/file/rfile/GenerateSplits.java   | 32 +--
 .../core/file/rfile/GenerateSplitsTest.java| 64 +++---
 2 files changed, 82 insertions(+), 14 deletions(-)

diff --cc 
core/src/main/java/org/apache/accumulo/core/file/rfile/GenerateSplits.java
index 5928ade21d,865210a970..7ee151f0a2
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/GenerateSplits.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/GenerateSplits.java
@@@ -137,19 -143,33 +144,33 @@@ public class GenerateSplits implements 
Path path = new Path(file);
fs = PrintInfo.resolveFS(log, hadoopConf, path);
// get all the files in the directory
 -  filePaths.addAll(getFiles(fs, path));
 +  files.addAll(getFiles(fs, path));
  }
  
 -if (filePaths.isEmpty()) {
 +if (files.isEmpty()) {
throw new IllegalArgumentException("No files were found in " + 
opts.files);
  } else {
 -  log.trace("Found the following files: {}", filePaths);
 +  log.trace("Found the following files: {}", files);
  }
  
+ if (!encode) {
+   // Generate the allowed Character set
+   for (int i = 0; i < 10; i++) {
+ // 0-9
+ allowedChars.add((char) (i + 48));
+   }
+   for (int i = 0; i < 26; i++) {
+ // Uppercase A-Z
+ allowedChars.add((char) (i + 65));
+ // Lowercase a-z
+ allowedChars.add((char) (i + 97));
+   }
+ }
+ 
  // if no size specified look at indexed keys first
  if (opts.splitSize == 0) {
 -  splits = getIndexKeys(siteConf, hadoopConf, fs, filePaths, 
requestedNumSplits, encode,
 -  cryptoService);
 +  splits =
 +  getIndexKeys(siteConf, hadoopConf, fs, files, requestedNumSplits, 
encode, cryptoService);
// if there weren't enough splits indexed, try again with size = 0
if (splits.size() < requestedNumSplits) {
  log.info("Only found {} indexed keys but need {}. Doing a full scan 
on files {}",



(accumulo) branch main updated (162b8effb1 -> 171a1e144c)

2024-03-12 Thread ddanielr
This is an automated email from the ASF dual-hosted git repository.

ddanielr pushed a change to branch main
in repository https://gitbox.apache.org/repos/asf/accumulo.git


from 162b8effb1 Replace long + TimeUnit with Duration in 
ReadOnlyTStore.unreserve() (#4358)
 add 92331ea113 Throw error when non-standard chars exist (#4348)
 new 171a1e144c Merge branch '2.1'

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../accumulo/core/file/rfile/GenerateSplits.java   | 32 +--
 .../core/file/rfile/GenerateSplitsTest.java| 64 +++---
 2 files changed, 82 insertions(+), 14 deletions(-)



(accumulo) branch elasticity updated: partially avoids splitting and compacting offline tablets (#4343)

2024-03-12 Thread kturner
This is an automated email from the ASF dual-hosted git repository.

kturner pushed a commit to branch elasticity
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/elasticity by this push:
 new 744dc5286d partially avoids splitting and compacting offline tablets 
(#4343)
744dc5286d is described below

commit 744dc5286ddc92cf8007c46d664e1deef9afe56d
Author: Keith Turner 
AuthorDate: Tue Mar 12 11:18:46 2024 -0400

partially avoids splitting and compacting offline tablets (#4343)

Modifies TabletManagementIterator so that it does not return tablets
that need split of compaction of if the table is offline.  This
partially prevents those tablets from splitting.  Still need to handle
things that were queued for split of compaction when a table is taken
offline.  Once those other changes are made this change will still avoid
queuing and processing tablets for work that does not need to be done.
---
 .../manager/state/TabletManagementIterator.java|  2 +-
 .../functional/TabletManagementIteratorIT.java | 29 +++---
 2 files changed, 27 insertions(+), 4 deletions(-)

diff --git 
a/server/base/src/main/java/org/apache/accumulo/server/manager/state/TabletManagementIterator.java
 
b/server/base/src/main/java/org/apache/accumulo/server/manager/state/TabletManagementIterator.java
index 4704f691c8..2e6627c78e 100644
--- 
a/server/base/src/main/java/org/apache/accumulo/server/manager/state/TabletManagementIterator.java
+++ 
b/server/base/src/main/java/org/apache/accumulo/server/manager/state/TabletManagementIterator.java
@@ -270,7 +270,7 @@ public class TabletManagementIterator extends 
SkippingIterator {
   reasonsToReturnThisTablet.add(ManagementAction.NEEDS_LOCATION_UPDATE);
 }
 
-if (tm.getOperationId() == null
+if (tm.getOperationId() == null && 
tabletMgmtParams.isTableOnline(tm.getTableId())
 && Collections.disjoint(REASONS_NOT_TO_SPLIT_OR_COMPACT, 
reasonsToReturnThisTablet)) {
   try {
 if (shouldReturnDueToSplit(tm, 
this.env.getPluginEnv().getConfiguration(tm.getTableId( {
diff --git 
a/test/src/main/java/org/apache/accumulo/test/functional/TabletManagementIteratorIT.java
 
b/test/src/main/java/org/apache/accumulo/test/functional/TabletManagementIteratorIT.java
index c88989d1c4..34723f3fae 100644
--- 
a/test/src/main/java/org/apache/accumulo/test/functional/TabletManagementIteratorIT.java
+++ 
b/test/src/main/java/org/apache/accumulo/test/functional/TabletManagementIteratorIT.java
@@ -50,6 +50,7 @@ import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 import org.apache.accumulo.core.client.admin.TabletAvailability;
 import org.apache.accumulo.core.clientImpl.ClientContext;
+import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
@@ -112,7 +113,7 @@ public class TabletManagementIteratorIT extends 
AccumuloClusterHarness {
 
 try (AccumuloClient client = 
Accumulo.newClient().from(getClientProps()).build()) {
 
-  String[] tables = getUniqueNames(9);
+  String[] tables = getUniqueNames(10);
   final String t1 = tables[0];
   final String t2 = tables[1];
   final String t3 = tables[2];
@@ -122,6 +123,7 @@ public class TabletManagementIteratorIT extends 
AccumuloClusterHarness {
   final String metaCopy3 = tables[6];
   final String metaCopy4 = tables[7];
   final String metaCopy5 = tables[8];
+  final String metaCopy6 = tables[9];
 
   // create some metadata
   createTable(client, t1, true);
@@ -156,6 +158,7 @@ public class TabletManagementIteratorIT extends 
AccumuloClusterHarness {
   copyTable(client, metaCopy1, metaCopy3);
   copyTable(client, metaCopy1, metaCopy4);
   copyTable(client, metaCopy1, metaCopy5);
+  copyTable(client, metaCopy1, metaCopy6);
 
   // t1 is unassigned, setting to always will generate a change to host 
tablets
   setTabletAvailability(client, metaCopy1, t1, 
TabletAvailability.HOSTED.name());
@@ -240,8 +243,27 @@ public class TabletManagementIteratorIT extends 
AccumuloClusterHarness {
   assertEquals(1, findTabletsNeedingAttention(client, metaCopy4, 
tabletMgmtParams),
   "Should have one tablet that needs a volume replacement");
 
+  // In preparation for split an offline testing ensure nothing needs 
attention
+  tabletMgmtParams = createParameters(client);
+  addFiles(client, metaCopy6, t4);
+  assertEquals(0, findTabletsNeedingAttention(client, metaCopy6, 
tabletMgmtParams),
+  "No tablets should need attention");
+  // Lower the split threshold for the table, should cause the files added 
to need attention.
+  client.tableOperations().setProperty(tables[3], 
Property.TABLE_SPLIT_THRESHOLD.getKey(),
+   

(accumulo) branch 2.1 updated: Throw error when non-standard chars exist (#4348)

2024-03-12 Thread ddanielr
This is an automated email from the ASF dual-hosted git repository.

ddanielr pushed a commit to branch 2.1
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/2.1 by this push:
 new 92331ea113 Throw error when non-standard chars exist (#4348)
92331ea113 is described below

commit 92331ea113b12c0a833a0a5ea714294832c6d822
Author: Daniel Roberts 
AuthorDate: Tue Mar 12 11:12:53 2024 -0400

Throw error when non-standard chars exist (#4348)

* Throw error when non-standard chars exist

Adds an explicit error message when base64 coding should have been used.
Removes specical handling of `\` character and replaces it with an
allowed character set.

-

Co-authored-by: Keith Turner 
---
 .../accumulo/core/file/rfile/GenerateSplits.java   | 32 +--
 .../core/file/rfile/GenerateSplitsTest.java| 64 +++---
 2 files changed, 82 insertions(+), 14 deletions(-)

diff --git 
a/core/src/main/java/org/apache/accumulo/core/file/rfile/GenerateSplits.java 
b/core/src/main/java/org/apache/accumulo/core/file/rfile/GenerateSplits.java
index 1812358a59..865210a970 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/GenerateSplits.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/GenerateSplits.java
@@ -30,6 +30,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Base64;
 import java.util.Collections;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Set;
@@ -72,6 +73,10 @@ import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 public class GenerateSplits implements KeywordExecutable {
   private static final Logger log = 
LoggerFactory.getLogger(GenerateSplits.class);
 
+  private static final Set allowedChars = new HashSet<>();
+
+  private static final String encodeFlag = "-b64";
+
   static class Opts extends ConfigOpts {
 @Parameter(names = {"-n", "--num"},
 description = "The number of split points to generate. Can be used to 
create n+1 tablets. Cannot use with the split size option.")
@@ -81,7 +86,8 @@ public class GenerateSplits implements KeywordExecutable {
 description = "The minimum split size in uncompressed bytes. Cannot 
use with num splits option.")
 public long splitSize = 0;
 
-@Parameter(names = {"-b64", "--base64encoded"}, description = "Base 64 
encode the split points")
+@Parameter(names = {encodeFlag, "--base64encoded"},
+description = "Base 64 encode the split points")
 public boolean base64encode = false;
 
 @Parameter(names = {"-sf", "--splits-file"}, description = "Output the 
splits to a file")
@@ -89,6 +95,7 @@ public class GenerateSplits implements KeywordExecutable {
 
 @Parameter(description = "[ ...] -n  
| -ss ")
 public List files = new ArrayList<>();
+
   }
 
   @Override
@@ -145,6 +152,20 @@ public class GenerateSplits implements KeywordExecutable {
   log.trace("Found the following files: {}", filePaths);
 }
 
+if (!encode) {
+  // Generate the allowed Character set
+  for (int i = 0; i < 10; i++) {
+// 0-9
+allowedChars.add((char) (i + 48));
+  }
+  for (int i = 0; i < 26; i++) {
+// Uppercase A-Z
+allowedChars.add((char) (i + 65));
+// Lowercase a-z
+allowedChars.add((char) (i + 97));
+  }
+}
+
 // if no size specified look at indexed keys first
 if (opts.splitSize == 0) {
   splits = getIndexKeys(siteConf, hadoopConf, fs, filePaths, 
requestedNumSplits, encode,
@@ -256,16 +277,15 @@ public class GenerateSplits implements KeywordExecutable {
 if (encode) {
   return Base64.getEncoder().encodeToString(bytes);
 } else {
-  // drop non printable characters
   StringBuilder sb = new StringBuilder();
   for (byte aByte : bytes) {
 int c = 0xff & aByte;
-if (c == '\\') {
-  sb.append("");
-} else if (c >= 32 && c <= 126) {
+if (allowedChars.contains((char) c)) {
   sb.append((char) c);
 } else {
-  log.debug("Dropping non printable char: \\x{}", 
Integer.toHexString(c));
+  // Fail if non-printable characters are detected.
+  throw new UnsupportedOperationException("Non printable char: \\x" + 
Integer.toHexString(c)
+  + " detected. Must use Base64 encoded output.  The behavior 
around non printable chars changed in 2.1.3 to throw an error, the previous 
behavior was likely to cause bugs.");
 }
   }
   return sb.toString();
diff --git 
a/core/src/test/java/org/apache/accumulo/core/file/rfile/GenerateSplitsTest.java
 
b/core/src/test/java/org/apache/accumulo/core/file/rfile/GenerateSplitsTest.java
index d2b7a8577e..b0111bbe2a 100644
--- 
a/core/src/test/java/org/apache/accumulo/core/file/rfile/GenerateSplitsTest.java
+++ 

(accumulo) branch main updated: Replace long + TimeUnit with Duration in ReadOnlyTStore.unreserve() (#4358)

2024-03-12 Thread domgarguilo
This is an automated email from the ASF dual-hosted git repository.

domgarguilo pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/main by this push:
 new 162b8effb1 Replace long + TimeUnit with Duration in 
ReadOnlyTStore.unreserve() (#4358)
162b8effb1 is described below

commit 162b8effb13aacf864c0db6ccefb53bd3f652fa3
Author: Dom G 
AuthorDate: Tue Mar 12 09:24:57 2024 -0400

Replace long + TimeUnit with Duration in ReadOnlyTStore.unreserve() (#4358)
---
 .../org/apache/accumulo/core/fate/AdminUtil.java   |  8 
 .../org/apache/accumulo/core/fate/AgeOffStore.java | 10 -
 .../java/org/apache/accumulo/core/fate/Fate.java   | 14 ++---
 .../apache/accumulo/core/fate/ReadOnlyTStore.java  |  5 ++---
 .../org/apache/accumulo/core/fate/ZooStore.java| 24 ++
 .../apache/accumulo/core/logging/FateLogger.java   |  6 +++---
 .../apache/accumulo/core/fate/AgeOffStoreTest.java | 18 
 .../org/apache/accumulo/core/fate/TestStore.java   |  4 ++--
 8 files changed, 43 insertions(+), 46 deletions(-)

diff --git a/core/src/main/java/org/apache/accumulo/core/fate/AdminUtil.java 
b/core/src/main/java/org/apache/accumulo/core/fate/AdminUtil.java
index 858e6e6998..7cc0a9c004 100644
--- a/core/src/main/java/org/apache/accumulo/core/fate/AdminUtil.java
+++ b/core/src/main/java/org/apache/accumulo/core/fate/AdminUtil.java
@@ -20,6 +20,7 @@ package org.apache.accumulo.core.fate;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 
+import java.time.Duration;
 import java.time.ZoneOffset;
 import java.time.format.DateTimeFormatter;
 import java.util.ArrayList;
@@ -32,7 +33,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
-import java.util.concurrent.TimeUnit;
 
 import org.apache.accumulo.core.fate.ReadOnlyTStore.TStatus;
 import org.apache.accumulo.core.fate.zookeeper.FateLock;
@@ -368,7 +368,7 @@ public class AdminUtil {
 
   long timeCreated = zs.timeCreated(tid);
 
-  zs.unreserve(tid, 0, TimeUnit.MILLISECONDS);
+  zs.unreserve(tid, Duration.ZERO);
 
   if (includeByStatus(status, filterStatus) && includeByTxid(tid, 
filterTxid)) {
 statuses.add(new TransactionStatus(tid, status, txName, hlocks, 
wlocks, top, timeCreated));
@@ -451,7 +451,7 @@ public class AdminUtil {
 break;
 }
 
-zs.unreserve(txid, 0, TimeUnit.MILLISECONDS);
+zs.unreserve(txid, Duration.ZERO);
 return state;
   }
 
@@ -495,7 +495,7 @@ public class AdminUtil {
 break;
 }
 
-zs.unreserve(txid, 0, TimeUnit.MILLISECONDS);
+zs.unreserve(txid, Duration.ZERO);
 return state;
   }
 
diff --git a/core/src/main/java/org/apache/accumulo/core/fate/AgeOffStore.java 
b/core/src/main/java/org/apache/accumulo/core/fate/AgeOffStore.java
index ca016d0c9c..bd2bd5208b 100644
--- a/core/src/main/java/org/apache/accumulo/core/fate/AgeOffStore.java
+++ b/core/src/main/java/org/apache/accumulo/core/fate/AgeOffStore.java
@@ -19,13 +19,13 @@
 package org.apache.accumulo.core.fate;
 
 import java.io.Serializable;
+import java.time.Duration;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
-import java.util.concurrent.TimeUnit;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -108,7 +108,7 @@ public class AgeOffStore implements TStore {
   }
 
 } finally {
-  store.unreserve(txid, 0, TimeUnit.MILLISECONDS);
+  store.unreserve(txid, Duration.ZERO);
 }
   } catch (Exception e) {
 log.warn("Failed to age off FATE tx " + FateTxId.formatTid(txid), e);
@@ -138,7 +138,7 @@ public class AgeOffStore implements TStore {
 break;
 }
   } finally {
-store.unreserve(txid, 0, TimeUnit.MILLISECONDS);
+store.unreserve(txid, Duration.ZERO);
   }
 }
   }
@@ -166,8 +166,8 @@ public class AgeOffStore implements TStore {
   }
 
   @Override
-  public void unreserve(long tid, long deferTime, TimeUnit deferTimeUnit) {
-store.unreserve(tid, deferTime, deferTimeUnit);
+  public void unreserve(long tid, Duration deferTime) {
+store.unreserve(tid, deferTime);
   }
 
   @Override
diff --git a/core/src/main/java/org/apache/accumulo/core/fate/Fate.java 
b/core/src/main/java/org/apache/accumulo/core/fate/Fate.java
index 1a14418b1a..4fe07bb8b2 100644
--- a/core/src/main/java/org/apache/accumulo/core/fate/Fate.java
+++ b/core/src/main/java/org/apache/accumulo/core/fate/Fate.java
@@ -30,12 +30,12 @@ import static 
org.apache.accumulo.core.fate.ReadOnlyTStore.TStatus.SUCCESSFUL;
 import static org.apache.accumulo.core.fate.ReadOnlyTStore.TStatus.UNKNOWN;
 import static org.apache.accumulo.core.util.ShutdownUtil.isIOException;
 
+import java.time.Duration;
 import java.util.EnumSet;
 import 

(accumulo) branch elasticity updated: Refactor classes to use the Caches object (#4359)

2024-03-12 Thread dlmarion
This is an automated email from the ASF dual-hosted git repository.

dlmarion pushed a commit to branch elasticity
in repository https://gitbox.apache.org/repos/asf/accumulo.git


The following commit(s) were added to refs/heads/elasticity by this push:
 new 7ea10de6c8 Refactor classes to use the Caches object (#4359)
7ea10de6c8 is described below

commit 7ea10de6c8af6a8c79e0ae6aadcc491566e75cee
Author: Dave Marion 
AuthorDate: Tue Mar 12 08:10:10 2024 -0400

Refactor classes to use the Caches object (#4359)
---
 .../java/org/apache/accumulo/core/util/cache/Caches.java   |  7 ++-
 .../accumulo/server/compaction/CompactionJobGenerator.java |  4 ++--
 .../accumulo/server/conf/ServerConfigurationFactory.java   | 14 --
 .../org/apache/accumulo/server/fs/VolumeManagerImpl.java   |  6 --
 4 files changed, 20 insertions(+), 11 deletions(-)

diff --git a/core/src/main/java/org/apache/accumulo/core/util/cache/Caches.java 
b/core/src/main/java/org/apache/accumulo/core/util/cache/Caches.java
index a96af36bc5..f5ef8e4c8f 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/cache/Caches.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/cache/Caches.java
@@ -42,12 +42,14 @@ public class Caches implements MetricsProducer {
 COMPACTION_CONFIGS,
 COMPACTION_DIR_CACHE,
 COMPACTION_DISPATCHERS,
+COMPACTION_SERVICE_UNKNOWN,
 COMPACTOR_GROUP_ID,
 COMPRESSION_ALGORITHM,
 CRYPT_PASSWORDS,
 HOST_REGEX_BALANCER_TABLE_REGEX,
 INSTANCE_ID,
 NAMESPACE_ID,
+NAMESPACE_CONFIGS,
 PROP_CACHE,
 RECOVERY_MANAGER_PATH_CACHE,
 SCAN_SERVER_TABLET_METADATA,
@@ -56,10 +58,13 @@ public class Caches implements MetricsProducer {
 SPLITTER_FILES,
 SPLITTER_STARTING,
 SPLITTER_UNSPLITTABLE,
+TABLE_CONFIGS,
 TABLE_ID,
+TABLE_PARENT_CONFIGS,
 TABLE_ZOO_HELPER_CACHE,
 TSRM_FILE_LENGTHS,
-TINYLFU_BLOCK_CACHE;
+TINYLFU_BLOCK_CACHE,
+VOLUME_HDFS_CONFIGS;
   }
 
   private static final Logger LOG = LoggerFactory.getLogger(Caches.class);
diff --git 
a/server/base/src/main/java/org/apache/accumulo/server/compaction/CompactionJobGenerator.java
 
b/server/base/src/main/java/org/apache/accumulo/server/compaction/CompactionJobGenerator.java
index 02e3dc2fca..1d88de2eaa 100644
--- 
a/server/base/src/main/java/org/apache/accumulo/server/compaction/CompactionJobGenerator.java
+++ 
b/server/base/src/main/java/org/apache/accumulo/server/compaction/CompactionJobGenerator.java
@@ -55,7 +55,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.github.benmanes.caffeine.cache.Cache;
-import com.github.benmanes.caffeine.cache.Caffeine;
 
 public class CompactionJobGenerator {
   private static final Logger log = 
LoggerFactory.getLogger(CompactionJobGenerator.class);
@@ -86,7 +85,8 @@ public class CompactionJobGenerator {
   v.isEmpty() ? Map.of() : Collections.unmodifiableMap(v)));
 }
 unknownCompactionServiceErrorCache =
-Caffeine.newBuilder().expireAfterWrite(5, TimeUnit.MINUTES).build();
+
Caches.getInstance().createNewBuilder(CacheName.COMPACTION_SERVICE_UNKNOWN, 
false)
+.expireAfterWrite(5, TimeUnit.MINUTES).build();
   }
 
   public Collection generateJobs(TabletMetadata tablet, 
Set kinds) {
diff --git 
a/server/base/src/main/java/org/apache/accumulo/server/conf/ServerConfigurationFactory.java
 
b/server/base/src/main/java/org/apache/accumulo/server/conf/ServerConfigurationFactory.java
index 78b1e4f18b..c6f32946c3 100644
--- 
a/server/base/src/main/java/org/apache/accumulo/server/conf/ServerConfigurationFactory.java
+++ 
b/server/base/src/main/java/org/apache/accumulo/server/conf/ServerConfigurationFactory.java
@@ -36,6 +36,8 @@ import org.apache.accumulo.core.conf.DefaultConfiguration;
 import org.apache.accumulo.core.conf.SiteConfiguration;
 import org.apache.accumulo.core.data.NamespaceId;
 import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.core.util.cache.Caches;
+import org.apache.accumulo.core.util.cache.Caches.CacheName;
 import org.apache.accumulo.core.util.threads.ThreadPools;
 import org.apache.accumulo.core.util.threads.Threads;
 import org.apache.accumulo.server.ServerContext;
@@ -49,7 +51,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.github.benmanes.caffeine.cache.Cache;
-import com.github.benmanes.caffeine.cache.Caffeine;
 
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 
@@ -80,11 +81,12 @@ public class ServerConfigurationFactory extends 
ServerConfiguration {
 this.systemConfig = memoize(() -> new SystemConfiguration(context,
 SystemPropKey.of(context.getInstanceID()), siteConfig));
 tableParentConfigs =
-Caffeine.newBuilder().expireAfterAccess(CACHE_EXPIRATION_HRS, 
TimeUnit.HOURS).build();
-tableConfigs =
-Caffeine.newBuilder().expireAfterAccess(CACHE_EXPIRATION_HRS, 
TimeUnit.HOURS).build();
-namespaceConfigs =
-