This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new a14b395cbc HDDS-12922. Use OMDBDefinition in GeneratorOm and
FSORepairTool (#8355)
a14b395cbc is described below
commit a14b395cbc9dbcf55c887d4f743df42cdff580e1
Author: Tsz-Wo Nicholas Sze <[email protected]>
AuthorDate: Tue Apr 29 01:58:31 2025 -0700
HDDS-12922. Use OMDBDefinition in GeneratorOm and FSORepairTool (#8355)
---
.../org/apache/hadoop/hdds/utils/db/DBStore.java | 27 ---------------
.../org/apache/hadoop/hdds/utils/db/RDBStore.java | 16 ---------
.../apache/hadoop/hdds/utils/db/TypedTable.java | 28 +--------------
.../hadoop/hdds/utils/db/TestRDBTableStore.java | 13 ++++---
.../hdds/utils/db/TestTypedRDBTableStore.java | 15 +++-----
.../recon/recovery/ReconOmMetadataManagerImpl.java | 3 +-
.../freon/containergenerator/GeneratorOm.java | 17 +++------
.../hadoop/ozone/repair/om/FSORepairTool.java | 40 +++++++++-------------
8 files changed, 36 insertions(+), 123 deletions(-)
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java
index eae3148ae0..0b61822fb5 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java
@@ -52,33 +52,6 @@ default <KEY, VALUE> TypedTable<KEY, VALUE> getTable(String
name, Codec<KEY> key
return getTable(name, keyCodec, valueCodec, CacheType.PARTIAL_CACHE);
}
- /**
- * Gets an existing TableStore with implicit key/value conversion and
- * with default cache type for cache. Default cache type is partial cache.
- *
- * @param name - Name of the TableStore to get
- * @param keyType
- * @param valueType
- * @return - TableStore.
- * @throws IOException on Failure
- */
- <KEY, VALUE> Table<KEY, VALUE> getTable(String name,
- Class<KEY> keyType, Class<VALUE> valueType) throws IOException;
-
- /**
- * Gets an existing TableStore with implicit key/value conversion and
- * with specified cache type.
- * @param name - Name of the TableStore to get
- * @param keyType
- * @param valueType
- * @param cacheType
- * @return - TableStore.
- * @throws IOException
- */
- <KEY, VALUE> Table<KEY, VALUE> getTable(String name,
- Class<KEY> keyType, Class<VALUE> valueType,
- TableCache.CacheType cacheType) throws IOException;
-
/**
* Gets table store with implict key/value conversion.
*
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
index b2ecc2faed..491f8ff541 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
@@ -61,7 +61,6 @@ public class RDBStore implements DBStore {
LoggerFactory.getLogger(RDBStore.class);
private final RocksDatabase db;
private final File dbLocation;
- private final CodecRegistry codecRegistry;
private RocksDBStoreMetrics metrics;
private final RDBCheckpointManager checkPointManager;
private final String checkpointsParentDir;
@@ -90,7 +89,6 @@ public RDBStore(File dbFile, ManagedDBOptions dbOptions,
ManagedStatistics stati
Preconditions.checkNotNull(families);
Preconditions.checkArgument(!families.isEmpty());
this.maxDbUpdatesSizeThreshold = maxDbUpdatesSizeThreshold;
- codecRegistry = registry;
dbLocation = dbFile;
this.dbOptions = dbOptions;
this.statistics = statistics;
@@ -297,26 +295,12 @@ public RDBTable getTable(String name) throws IOException {
return new RDBTable(this.db, handle, rdbMetrics);
}
- @Override
- public <K, V> TypedTable<K, V> getTable(String name,
- Class<K> keyType, Class<V> valueType) throws IOException {
- return new TypedTable<>(getTable(name), codecRegistry, keyType,
- valueType);
- }
-
@Override
public <K, V> TypedTable<K, V> getTable(
String name, Codec<K> keyCodec, Codec<V> valueCodec,
TableCache.CacheType cacheType) throws IOException {
return new TypedTable<>(getTable(name), keyCodec, valueCodec, cacheType);
}
- @Override
- public <K, V> Table<K, V> getTable(String name,
- Class<K> keyType, Class<V> valueType,
- TableCache.CacheType cacheType) throws IOException {
- return new TypedTable<>(getTable(name), codecRegistry, keyType, valueType,
cacheType);
- }
-
@Override
public ArrayList<Table> listTables() {
ArrayList<Table> returnList = new ArrayList<>();
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
index f39d55327a..dcf482aad2 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
@@ -68,31 +68,6 @@ public class TypedTable<KEY, VALUE> implements Table<KEY,
VALUE> {
= new CodecBuffer.Capacity(this, BUFFER_SIZE_DEFAULT);
private final TableCache<KEY, VALUE> cache;
- /**
- * The same as this(rawTable, codecRegistry, keyType, valueType,
- * CacheType.PARTIAL_CACHE).
- */
- TypedTable(RDBTable rawTable, CodecRegistry codecRegistry, Class<KEY>
keyType, Class<VALUE> valueType)
- throws IOException {
- this(rawTable, codecRegistry, keyType, valueType, CacheType.PARTIAL_CACHE);
- }
-
- /**
- * Create an TypedTable from the raw table with specified cache type.
- *
- * @param rawTable The underlying (untyped) table in RocksDB.
- * @param codecRegistry To look up codecs.
- * @param keyType The key type.
- * @param valueType The value type.
- * @param cacheType How to cache the entries?
- * @throws IOException if failed to iterate the raw table.
- */
- TypedTable(RDBTable rawTable, CodecRegistry codecRegistry, Class<KEY>
keyType, Class<VALUE> valueType,
- CacheType cacheType) throws IOException {
- this(rawTable, codecRegistry.getCodecFromClass(keyType),
codecRegistry.getCodecFromClass(valueType),
- cacheType);
- }
-
/**
* Create an TypedTable from the raw table with specified cache type.
*
@@ -102,8 +77,7 @@ public class TypedTable<KEY, VALUE> implements Table<KEY,
VALUE> {
* @param cacheType How to cache the entries?
* @throws IOException
*/
- public TypedTable(
- RDBTable rawTable, Codec<KEY> keyCodec, Codec<VALUE> valueCodec,
CacheType cacheType) throws IOException {
+ TypedTable(RDBTable rawTable, Codec<KEY> keyCodec, Codec<VALUE> valueCodec,
CacheType cacheType) throws IOException {
this.rawTable = Objects.requireNonNull(rawTable, "rawTable==null");
this.keyCodec = Objects.requireNonNull(keyCodec, "keyCodec == null");
this.valueCodec = Objects.requireNonNull(valueCodec, "valueCodec == null");
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
index fd593016d1..065e8728e7 100644
---
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
@@ -42,7 +42,7 @@
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
-import org.apache.hadoop.hdds.utils.db.cache.TableCache;
+import org.apache.hadoop.hdds.utils.db.cache.TableCache.CacheType;
import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
import org.junit.jupiter.api.AfterEach;
@@ -309,13 +309,13 @@ public void batchDelete() throws Exception {
@Test
public void putGetTypedTableCodec() throws Exception {
- try (Table<String, String> testTable = rdbStore.getTable("Ten",
String.class, String.class)) {
+ try (Table<String, String> testTable = rdbStore.getTable("Ten",
StringCodec.get(), StringCodec.get())) {
testTable.put("test1", "123");
assertFalse(testTable.isEmpty());
assertEquals("123", testTable.get("test1"));
}
try (Table<String, ByteString> testTable = rdbStore.getTable("Ten",
- StringCodec.get(), ByteStringCodec.get(),
TableCache.CacheType.NO_CACHE)) {
+ StringCodec.get(), ByteStringCodec.get(), CacheType.NO_CACHE)) {
assertEquals("123", testTable.get("test1").toStringUtf8());
}
}
@@ -407,8 +407,7 @@ public void testGetByteBuffer() throws Exception {
final String tableName = families.get(0);
try (RDBTable testTable = rdbStore.getTable(tableName)) {
final TypedTable<String, String> typedTable = new TypedTable<>(
- testTable, CodecRegistry.newBuilder().build(),
- String.class, String.class);
+ testTable, StringCodec.get(), StringCodec.get(),
CacheType.PARTIAL_CACHE);
for (int i = 0; i < 20; i++) {
final int valueSize = TypedTable.BUFFER_SIZE_DEFAULT * i / 4;
@@ -594,7 +593,7 @@ public void testStringPrefixedIterator() throws Exception {
final List<Map<String, String>> data = generateKVs(prefixes, keyCount);
try (TypedTable<String, String> table = rdbStore.getTable(
- "PrefixFirst", String.class, String.class)) {
+ "PrefixFirst", StringCodec.get(), StringCodec.get())) {
populateTable(table, data);
for (String prefix : prefixes) {
assertIterator(keyCount, prefix, table);
@@ -633,7 +632,7 @@ static void assertIterator(int expectedCount, String prefix,
@Test
public void testStringPrefixedIteratorCloseDb() throws Exception {
try (Table<String, String> testTable = rdbStore.getTable(
- "PrefixFirst", String.class, String.class)) {
+ "PrefixFirst", StringCodec.get(), StringCodec.get())) {
// iterator should seek to right pos in the middle
rdbStore.close();
assertThrows(IOException.class, () -> testTable.iterator("abc"));
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java
index 8fb7108698..58b646c10b 100644
---
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java
@@ -44,6 +44,7 @@
import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.hdds.utils.db.cache.TableCache.CacheType;
import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
import org.apache.ozone.test.GenericTestUtils;
@@ -69,7 +70,6 @@ public class TestTypedRDBTableStore {
"Ninth", "Ten");
private RDBStore rdbStore = null;
private ManagedDBOptions options = null;
- private CodecRegistry codecRegistry;
@BeforeEach
public void setUp(@TempDir File tempDir) throws Exception {
@@ -91,9 +91,6 @@ public void setUp(@TempDir File tempDir) throws Exception {
}
rdbStore = TestRDBStore.newRDBStore(tempDir, options, configSet,
MAX_DB_UPDATES_SIZE_THRESHOLD);
-
- codecRegistry = CodecRegistry.newBuilder().build();
-
}
@AfterEach
@@ -123,10 +120,9 @@ public void putGetAndEmpty() throws Exception {
private Table<String, String> createTypedTable(String name)
throws IOException {
- return new TypedTable<String, String>(
+ return new TypedTable<>(
rdbStore.getTable(name),
- codecRegistry,
- String.class, String.class);
+ StringCodec.get(), StringCodec.get(), CacheType.PARTIAL_CACHE);
}
@Test
@@ -253,7 +249,7 @@ public void testIteratorOnException() throws Exception {
when(rdbTable.iterator((CodecBuffer) null))
.thenThrow(new IOException());
try (Table<String, String> testTable = new TypedTable<>(rdbTable,
- codecRegistry, String.class, String.class)) {
+ StringCodec.get(), StringCodec.get(), CacheType.PARTIAL_CACHE)) {
assertThrows(IOException.class, testTable::iterator);
}
}
@@ -411,8 +407,7 @@ public void testCountEstimatedRowsInTable() throws
Exception {
public void testByteArrayTypedTable() throws Exception {
try (Table<byte[], byte[]> testTable = new TypedTable<>(
rdbStore.getTable("Ten"),
- codecRegistry,
- byte[].class, byte[].class)) {
+ ByteArrayCodec.get(), ByteArrayCodec.get(),
CacheType.PARTIAL_CACHE)) {
byte[] key = new byte[] {1, 2, 3};
byte[] value = new byte[] {4, 5, 6};
testTable.put(key, value);
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
index 671e7c1f9e..a9db3657c4 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
@@ -34,6 +34,7 @@
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
import org.apache.hadoop.hdds.utils.db.RDBStore;
+import org.apache.hadoop.hdds.utils.db.StringCodec;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.hdds.utils.db.cache.TableCache;
@@ -112,7 +113,7 @@ private void initializeNewRdbStore(File dbFile) throws
IOException {
@Override
public Table<String, KeyEntityInfoProtoWrapper> getKeyTableLite(BucketLayout
bucketLayout) throws IOException {
String tableName = bucketLayout.isFileSystemOptimized() ? FILE_TABLE :
KEY_TABLE;
- return getStore().getTable(tableName, String.class,
KeyEntityInfoProtoWrapper.class);
+ return getStore().getTable(tableName, StringCodec.get(),
KeyEntityInfoProtoWrapper.getCodec());
}
@Override
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java
index 63a1484543..4f94250c02 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java
@@ -41,6 +41,7 @@
import org.apache.hadoop.ozone.freon.FreonSubcommand;
import org.apache.hadoop.ozone.om.OMStorage;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.codec.OMDBDefinition;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo.Builder;
@@ -111,9 +112,7 @@ public Void call() throws Exception {
// initialization: create one bucket and volume in OM.
writeOmBucketVolume();
- omKeyTable = omDb.getTable(OmMetadataManagerImpl.KEY_TABLE, String.class,
- OmKeyInfo.class);
-
+ omKeyTable = OMDBDefinition.KEY_TABLE.getTable(omDb);
timer = getMetrics().timer("om-generator");
runTests(this::writeOmKeys);
@@ -142,9 +141,7 @@ public void writeOmKeys(long index) throws Exception {
private void writeOmBucketVolume() throws IOException {
- Table<String, OmVolumeArgs> volTable =
- omDb.getTable(OmMetadataManagerImpl.VOLUME_TABLE, String.class,
- OmVolumeArgs.class);
+ final Table<String, OmVolumeArgs> volTable =
OMDBDefinition.VOLUME_TABLE.getTable(omDb);
String admin = getUserId();
String owner = getUserId();
@@ -166,9 +163,7 @@ private void writeOmBucketVolume() throws IOException {
volTable.put("/" + volumeName, omVolumeArgs);
- final Table<String, PersistedUserVolumeInfo> userTable =
- omDb.getTable(OmMetadataManagerImpl.USER_TABLE, String.class,
- PersistedUserVolumeInfo.class);
+ final Table<String, PersistedUserVolumeInfo> userTable =
OMDBDefinition.USER_TABLE.getTable(omDb);
PersistedUserVolumeInfo currentUserVolumeInfo =
userTable.get(getUserId());
@@ -189,9 +184,7 @@ private void writeOmBucketVolume() throws IOException {
userTable.put(getUserId(), currentUserVolumeInfo);
- Table<String, OmBucketInfo> bucketTable =
- omDb.getTable(OmMetadataManagerImpl.BUCKET_TABLE, String.class,
- OmBucketInfo.class);
+ final Table<String, OmBucketInfo> bucketTable =
OMDBDefinition.BUCKET_TABLE.getTable(omDb);
OmBucketInfo omBucketInfo = new OmBucketInfo.Builder()
.setBucketName(bucketName)
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java
index 74ae33ae95..f85ec7099a 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java
@@ -30,12 +30,16 @@
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.hdds.utils.db.ByteArrayCodec;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
+import org.apache.hadoop.hdds.utils.db.StringCodec;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.hdds.utils.db.TypedTable;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.codec.OMDBDefinition;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
@@ -79,6 +83,7 @@
public class FSORepairTool extends RepairTool {
private static final Logger LOG =
LoggerFactory.getLogger(FSORepairTool.class);
private static final String REACHABLE_TABLE = "reachable";
+ private static final byte[] EMPTY_BYTE_ARRAY = {};
@CommandLine.Option(names = {"--db"},
required = true,
@@ -105,6 +110,7 @@ public void execute() throws Exception {
Impl repairTool = new Impl();
repairTool.run();
} catch (Exception ex) {
+ LOG.error("FSO repair failed", ex);
throw new IllegalArgumentException("FSO repair failed: " +
ex.getMessage());
}
@@ -124,6 +130,7 @@ private class Impl {
private final Table<String, RepeatedOmKeyInfo> deletedTable;
private final Table<String, SnapshotInfo> snapshotInfoTable;
private DBStore reachableDB;
+ private TypedTable<String, byte[]> reachableTable;
private final ReportStatistics reachableStats;
private final ReportStatistics unreachableStats;
private final ReportStatistics unreferencedStats;
@@ -134,27 +141,13 @@ private class Impl {
this.unreferencedStats = new ReportStatistics(0, 0, 0);
this.store = getStoreFromPath(omDBPath);
- volumeTable = store.getTable(OmMetadataManagerImpl.VOLUME_TABLE,
- String.class,
- OmVolumeArgs.class);
- bucketTable = store.getTable(OmMetadataManagerImpl.BUCKET_TABLE,
- String.class,
- OmBucketInfo.class);
- directoryTable = store.getTable(OmMetadataManagerImpl.DIRECTORY_TABLE,
- String.class,
- OmDirectoryInfo.class);
- fileTable = store.getTable(OmMetadataManagerImpl.FILE_TABLE,
- String.class,
- OmKeyInfo.class);
- deletedDirectoryTable =
store.getTable(OmMetadataManagerImpl.DELETED_DIR_TABLE,
- String.class,
- OmKeyInfo.class);
- deletedTable = store.getTable(OmMetadataManagerImpl.DELETED_TABLE,
- String.class,
- RepeatedOmKeyInfo.class);
- snapshotInfoTable =
store.getTable(OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE,
- String.class,
- SnapshotInfo.class);
+ this.volumeTable = OMDBDefinition.VOLUME_TABLE.getTable(store);
+ this.bucketTable = OMDBDefinition.BUCKET_TABLE.getTable(store);
+ this.directoryTable = OMDBDefinition.DIRECTORY_TABLE.getTable(store);
+ this.fileTable = OMDBDefinition.FILE_TABLE.getTable(store);
+ this.deletedDirectoryTable =
OMDBDefinition.DELETED_DIR_TABLE.getTable(store);
+ this.deletedTable = OMDBDefinition.DELETED_TABLE.getTable(store);
+ this.snapshotInfoTable =
OMDBDefinition.SNAPSHOT_INFO_TABLE.getTable(store);
}
public Report run() throws Exception {
@@ -461,7 +454,7 @@ private Collection<String>
getChildDirectoriesAndMarkAsReachable(OmVolumeArgs vo
private void addReachableEntry(OmVolumeArgs volume, OmBucketInfo bucket,
WithObjectID object) throws IOException {
String reachableKey = buildReachableKey(volume, bucket, object);
// No value is needed for this table.
- reachableDB.getTable(REACHABLE_TABLE, String.class,
byte[].class).put(reachableKey, new byte[]{});
+ reachableTable.put(reachableKey, EMPTY_BYTE_ARRAY);
}
/**
@@ -471,7 +464,7 @@ private void addReachableEntry(OmVolumeArgs volume,
OmBucketInfo bucket, WithObj
protected boolean isReachable(String fileOrDirKey) throws IOException {
String reachableParentKey = buildReachableParentKey(fileOrDirKey);
- return reachableDB.getTable(REACHABLE_TABLE, String.class,
byte[].class).get(reachableParentKey) != null;
+ return reachableTable.get(reachableParentKey) != null;
}
private void openReachableDB() throws IOException {
@@ -488,6 +481,7 @@ private void openReachableDB() throws IOException {
.setPath(reachableDBFile.getParentFile().toPath())
.addTable(REACHABLE_TABLE)
.build();
+ reachableTable = reachableDB.getTable(REACHABLE_TABLE,
StringCodec.get(), ByteArrayCodec.get());
}
private void closeReachableDB() throws IOException {
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]