This is an automated email from the ASF dual-hosted git repository.
szetszwo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new a7fc290c200 HDDS-13236. Change Table methods not to throw IOException.
(#8645)
a7fc290c200 is described below
commit a7fc290c200216262a185d8c461d954e80699132
Author: Tsz-Wo Nicholas Sze <[email protected]>
AuthorDate: Tue Jun 17 14:12:35 2025 -0700
HDDS-13236. Change Table methods not to throw IOException. (#8645)
---
.../ozone/container/metadata/DatanodeTable.java | 39 +++++-----
.../metadata/SchemaOneDeletedBlocksTable.java | 29 ++++----
.../hadoop/hdds/utils/db/RDBBatchOperation.java | 24 +++---
.../org/apache/hadoop/hdds/utils/db/RDBStore.java | 7 +-
.../hdds/utils/db/RDBStoreAbstractIterator.java | 5 +-
.../hdds/utils/db/RDBStoreByteArrayIterator.java | 3 +-
.../hdds/utils/db/RDBStoreCodecBufferIterator.java | 3 +-
.../org/apache/hadoop/hdds/utils/db/RDBTable.java | 63 +++++++---------
.../org/apache/hadoop/hdds/utils/db/Table.java | 60 ++++++---------
.../apache/hadoop/hdds/utils/db/TableIterator.java | 9 +--
.../apache/hadoop/hdds/utils/db/TypedTable.java | 86 ++++++++++------------
.../hadoop/hdds/utils/MapBackedTableIterator.java | 7 +-
.../hadoop/hdds/utils/db/InMemoryTestTable.java | 4 +-
hadoop-hdds/server-scm/pom.xml | 4 +
.../scm/block/DeletedBlockLogStateManagerImpl.java | 8 +-
15 files changed, 152 insertions(+), 199 deletions(-)
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeTable.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeTable.java
index 5c405508676..bb60f27206b 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeTable.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeTable.java
@@ -18,10 +18,11 @@
package org.apache.hadoop.ozone.container.metadata;
import java.io.File;
-import java.io.IOException;
import java.util.List;
import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.hdds.utils.db.CodecException;
+import org.apache.hadoop.hdds.utils.db.RocksDatabaseException;
import org.apache.hadoop.hdds.utils.db.Table;
/**
@@ -42,34 +43,32 @@ public DatanodeTable(Table<KEY, VALUE> table) {
}
@Override
- public void put(KEY key, VALUE value) throws IOException {
+ public void put(KEY key, VALUE value) throws RocksDatabaseException,
CodecException {
table.put(key, value);
}
@Override
- public void putWithBatch(BatchOperation batch, KEY key,
- VALUE value) throws IOException {
+ public void putWithBatch(BatchOperation batch, KEY key, VALUE value) throws
RocksDatabaseException, CodecException {
table.putWithBatch(batch, key, value);
}
@Override
- public boolean isEmpty() throws IOException {
+ public boolean isEmpty() throws RocksDatabaseException {
return table.isEmpty();
}
@Override
- public void delete(KEY key) throws IOException {
+ public void delete(KEY key) throws RocksDatabaseException, CodecException {
table.delete(key);
}
@Override
- public void deleteRange(KEY beginKey, KEY endKey) throws IOException {
+ public void deleteRange(KEY beginKey, KEY endKey) throws
RocksDatabaseException, CodecException {
table.deleteRange(beginKey, endKey);
}
@Override
- public void deleteWithBatch(BatchOperation batch, KEY key)
- throws IOException {
+ public void deleteWithBatch(BatchOperation batch, KEY key) throws
CodecException {
table.deleteWithBatch(batch, key);
}
@@ -86,27 +85,27 @@ public String getName() {
}
@Override
- public long getEstimatedKeyCount() throws IOException {
+ public long getEstimatedKeyCount() throws RocksDatabaseException {
return table.getEstimatedKeyCount();
}
@Override
- public boolean isExist(KEY key) throws IOException {
+ public boolean isExist(KEY key) throws RocksDatabaseException,
CodecException {
return table.isExist(key);
}
@Override
- public VALUE get(KEY key) throws IOException {
+ public VALUE get(KEY key) throws RocksDatabaseException, CodecException {
return table.get(key);
}
@Override
- public VALUE getIfExist(KEY key) throws IOException {
+ public VALUE getIfExist(KEY key) throws RocksDatabaseException,
CodecException {
return table.getIfExist(key);
}
@Override
- public VALUE getReadCopy(KEY key) throws IOException {
+ public VALUE getReadCopy(KEY key) throws RocksDatabaseException,
CodecException {
return table.getReadCopy(key);
}
@@ -114,7 +113,7 @@ public VALUE getReadCopy(KEY key) throws IOException {
public List<KeyValue<KEY, VALUE>> getRangeKVs(
KEY startKey, int count, KEY prefix,
MetadataKeyFilters.MetadataKeyFilter... filters)
- throws IOException, IllegalArgumentException {
+ throws RocksDatabaseException, CodecException {
return table.getRangeKVs(startKey, count, prefix, filters);
}
@@ -122,24 +121,22 @@ public List<KeyValue<KEY, VALUE>> getRangeKVs(
public List<KeyValue<KEY, VALUE>> getSequentialRangeKVs(
KEY startKey, int count, KEY prefix,
MetadataKeyFilters.MetadataKeyFilter... filters)
- throws IOException, IllegalArgumentException {
+ throws RocksDatabaseException, CodecException {
return table.getSequentialRangeKVs(startKey, count, prefix, filters);
}
@Override
- public void deleteBatchWithPrefix(BatchOperation batch, KEY prefix)
- throws IOException {
+ public void deleteBatchWithPrefix(BatchOperation batch, KEY prefix) throws
RocksDatabaseException, CodecException {
table.deleteBatchWithPrefix(batch, prefix);
}
@Override
- public void dumpToFileWithPrefix(File externalFile, KEY prefix)
- throws IOException {
+ public void dumpToFileWithPrefix(File externalFile, KEY prefix) throws
RocksDatabaseException, CodecException {
table.dumpToFileWithPrefix(externalFile, prefix);
}
@Override
- public void loadFromFile(File externalFile) throws IOException {
+ public void loadFromFile(File externalFile) throws RocksDatabaseException {
table.loadFromFile(externalFile);
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneDeletedBlocksTable.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneDeletedBlocksTable.java
index fd0b6f85640..1e55e302fb1 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneDeletedBlocksTable.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneDeletedBlocksTable.java
@@ -17,11 +17,12 @@
package org.apache.hadoop.ozone.container.metadata;
-import java.io.IOException;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.hdds.utils.db.CodecException;
+import org.apache.hadoop.hdds.utils.db.RocksDatabaseException;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList;
@@ -51,50 +52,48 @@ public SchemaOneDeletedBlocksTable(Table<String,
ChunkInfoList> table) {
}
@Override
- public void put(String key, ChunkInfoList value) throws IOException {
+ public void put(String key, ChunkInfoList value) throws
RocksDatabaseException, CodecException {
super.put(prefix(key), value);
}
@Override
- public void putWithBatch(BatchOperation batch, String key,
- ChunkInfoList value)
- throws IOException {
+ public void putWithBatch(BatchOperation batch, String key, ChunkInfoList
value)
+ throws RocksDatabaseException, CodecException {
super.putWithBatch(batch, prefix(key), value);
}
@Override
- public void delete(String key) throws IOException {
+ public void delete(String key) throws RocksDatabaseException, CodecException
{
super.delete(prefix(key));
}
@Override
- public void deleteWithBatch(BatchOperation batch, String key)
- throws IOException {
+ public void deleteWithBatch(BatchOperation batch, String key) throws
CodecException {
super.deleteWithBatch(batch, prefix(key));
}
@Override
- public void deleteRange(String beginKey, String endKey) throws IOException {
+ public void deleteRange(String beginKey, String endKey) throws
RocksDatabaseException, CodecException {
super.deleteRange(prefix(beginKey), prefix(endKey));
}
@Override
- public boolean isExist(String key) throws IOException {
+ public boolean isExist(String key) throws RocksDatabaseException,
CodecException {
return super.isExist(prefix(key));
}
@Override
- public ChunkInfoList get(String key) throws IOException {
+ public ChunkInfoList get(String key) throws RocksDatabaseException,
CodecException {
return super.get(prefix(key));
}
@Override
- public ChunkInfoList getIfExist(String key) throws IOException {
+ public ChunkInfoList getIfExist(String key) throws RocksDatabaseException,
CodecException {
return super.getIfExist(prefix(key));
}
@Override
- public ChunkInfoList getReadCopy(String key) throws IOException {
+ public ChunkInfoList getReadCopy(String key) throws RocksDatabaseException,
CodecException {
return super.getReadCopy(prefix(key));
}
@@ -102,7 +101,7 @@ public ChunkInfoList getReadCopy(String key) throws
IOException {
public List<KeyValue<String, ChunkInfoList>> getRangeKVs(
String startKey, int count, String prefix,
MetadataKeyFilters.MetadataKeyFilter... filters)
- throws IOException, IllegalArgumentException {
+ throws RocksDatabaseException, CodecException {
// Deleted blocks will always have the #deleted# key prefix and nothing
// else in this schema version. Ignore any user passed prefixes that could
@@ -115,7 +114,7 @@ public List<KeyValue<String, ChunkInfoList>> getRangeKVs(
public List<KeyValue<String, ChunkInfoList>> getSequentialRangeKVs(
String startKey, int count, String prefix,
MetadataKeyFilters.MetadataKeyFilter... filters)
- throws IOException, IllegalArgumentException {
+ throws RocksDatabaseException, CodecException {
// Deleted blocks will always have the #deleted# key prefix and nothing
// else in this schema version. Ignore any user passed prefixes that could
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java
index 93e50a2d18c..8b9fa7295c7 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java
@@ -20,8 +20,6 @@
import static org.apache.hadoop.hdds.StringUtils.bytes2String;
import com.google.common.base.Preconditions;
-import java.io.Closeable;
-import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
@@ -32,6 +30,7 @@
import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteBatch;
import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteOptions;
import org.apache.ratis.util.TraditionalBinaryPrefix;
+import org.apache.ratis.util.UncheckedAutoCloseable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -156,7 +155,7 @@ private class FamilyCache {
}
/** Prepare batch write for the entire family. */
- void prepareBatchWrite() throws IOException {
+ void prepareBatchWrite() throws RocksDatabaseException {
Preconditions.checkState(!isCommit, "%s is already committed.", this);
isCommit = true;
for (Map.Entry<Bytes, Object> op : ops.entrySet()) {
@@ -289,7 +288,7 @@ void delete(ColumnFamily family, byte[] key) {
}
/** Prepare batch write for the entire cache. */
- Closeable prepareBatchWrite() throws IOException {
+ UncheckedAutoCloseable prepareBatchWrite() throws RocksDatabaseException {
for (Map.Entry<String, FamilyCache> e : name2cache.entrySet()) {
e.getValue().prepareBatchWrite();
}
@@ -341,19 +340,18 @@ public String toString() {
return name;
}
- public void commit(RocksDatabase db) throws IOException {
+ public void commit(RocksDatabase db) throws RocksDatabaseException {
debug(() -> String.format("%s: commit %s",
name, opCache.getCommitString()));
- try (Closeable ignored = opCache.prepareBatchWrite()) {
+ try (UncheckedAutoCloseable ignored = opCache.prepareBatchWrite()) {
db.batchWrite(writeBatch);
}
}
- public void commit(RocksDatabase db, ManagedWriteOptions writeOptions)
- throws IOException {
+ public void commit(RocksDatabase db, ManagedWriteOptions writeOptions)
throws RocksDatabaseException {
debug(() -> String.format("%s: commit-with-writeOptions %s",
name, opCache.getCommitString()));
- try (Closeable ignored = opCache.prepareBatchWrite()) {
+ try (UncheckedAutoCloseable ignored = opCache.prepareBatchWrite()) {
db.batchWrite(writeBatch, writeOptions);
}
}
@@ -365,17 +363,15 @@ public void close() {
opCache.clear();
}
- public void delete(ColumnFamily family, byte[] key) throws IOException {
+ public void delete(ColumnFamily family, byte[] key) {
opCache.delete(family, key);
}
- public void put(ColumnFamily family, CodecBuffer key, CodecBuffer value)
- throws IOException {
+ public void put(ColumnFamily family, CodecBuffer key, CodecBuffer value) {
opCache.put(family, key, value);
}
- public void put(ColumnFamily family, byte[] key, byte[] value)
- throws IOException {
+ public void put(ColumnFamily family, byte[] key, byte[] value) {
opCache.put(family, key, value);
}
}
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
index 3539a906117..6fafee9c2bb 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
@@ -303,17 +303,18 @@ public void commitBatchOperation(BatchOperation operation)
}
@Override
- public RDBTable getTable(String name) throws IOException {
+ public RDBTable getTable(String name) throws RocksDatabaseException {
final ColumnFamily handle = db.getColumnFamily(name);
if (handle == null) {
- throw new IOException("No such table in this DB. TableName : " + name);
+ throw new RocksDatabaseException("No such table in this DB. TableName :
" + name);
}
return new RDBTable(this.db, handle, rdbMetrics);
}
@Override
public <K, V> TypedTable<K, V> getTable(
- String name, Codec<K> keyCodec, Codec<V> valueCodec,
TableCache.CacheType cacheType) throws IOException {
+ String name, Codec<K> keyCodec, Codec<V> valueCodec,
TableCache.CacheType cacheType)
+ throws RocksDatabaseException, CodecException {
return new TypedTable<>(getTable(name), keyCodec, valueCodec, cacheType);
}
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreAbstractIterator.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreAbstractIterator.java
index 1e36d6bd073..b45d6c323fb 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreAbstractIterator.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreAbstractIterator.java
@@ -17,7 +17,6 @@
package org.apache.hadoop.hdds.utils.db;
-import java.io.IOException;
import java.util.NoSuchElementException;
import java.util.function.Consumer;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator;
@@ -59,7 +58,7 @@ abstract class RDBStoreAbstractIterator<RAW>
abstract void seek0(RAW key);
/** Delete the given key. */
- abstract void delete(RAW key) throws IOException;
+ abstract void delete(RAW key) throws RocksDatabaseException;
/** Does the given key start with the prefix? */
abstract boolean startsWithPrefix(RAW key);
@@ -136,7 +135,7 @@ public final Table.KeyValue<RAW, RAW> seek(RAW key) {
}
@Override
- public final void removeFromDB() throws IOException {
+ public final void removeFromDB() throws RocksDatabaseException,
CodecException {
if (rocksDBTable == null) {
throw new UnsupportedOperationException("remove");
}
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreByteArrayIterator.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreByteArrayIterator.java
index 10651fa7d25..6c0507ee59d 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreByteArrayIterator.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreByteArrayIterator.java
@@ -17,7 +17,6 @@
package org.apache.hadoop.hdds.utils.db;
-import java.io.IOException;
import java.util.Arrays;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator;
@@ -56,7 +55,7 @@ void seek0(byte[] key) {
}
@Override
- void delete(byte[] key) throws IOException {
+ void delete(byte[] key) throws RocksDatabaseException {
getRocksDBTable().delete(key);
}
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreCodecBufferIterator.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreCodecBufferIterator.java
index 96ae01b7217..59de79dd45c 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreCodecBufferIterator.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreCodecBufferIterator.java
@@ -17,7 +17,6 @@
package org.apache.hadoop.hdds.utils.db;
-import java.io.IOException;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator;
@@ -69,7 +68,7 @@ void seek0(CodecBuffer key) {
}
@Override
- void delete(CodecBuffer key) throws IOException {
+ void delete(CodecBuffer key) throws RocksDatabaseException {
assertOpen();
getRocksDBTable().delete(key.asReadOnlyByteBuffer());
}
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
index d463d37430e..361edb8032b 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hdds.utils.db;
import java.io.File;
-import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
@@ -63,17 +62,16 @@ public ColumnFamily getColumnFamily() {
return family;
}
- void put(ByteBuffer key, ByteBuffer value) throws IOException {
+ void put(ByteBuffer key, ByteBuffer value) throws RocksDatabaseException {
db.put(family, key, value);
}
@Override
- public void put(byte[] key, byte[] value) throws IOException {
+ public void put(byte[] key, byte[] value) throws RocksDatabaseException {
db.put(family, key, value);
}
- void putWithBatch(BatchOperation batch, CodecBuffer key, CodecBuffer value)
- throws IOException {
+ void putWithBatch(BatchOperation batch, CodecBuffer key, CodecBuffer value) {
if (batch instanceof RDBBatchOperation) {
((RDBBatchOperation) batch).put(family, key, value);
} else {
@@ -83,8 +81,7 @@ void putWithBatch(BatchOperation batch, CodecBuffer key,
CodecBuffer value)
}
@Override
- public void putWithBatch(BatchOperation batch, byte[] key, byte[] value)
- throws IOException {
+ public void putWithBatch(BatchOperation batch, byte[] key, byte[] value) {
if (batch instanceof RDBBatchOperation) {
((RDBBatchOperation) batch).put(family, key, value);
} else {
@@ -93,7 +90,7 @@ public void putWithBatch(BatchOperation batch, byte[] key,
byte[] value)
}
@Override
- public boolean isEmpty() throws IOException {
+ public boolean isEmpty() throws RocksDatabaseException {
try (KeyValueIterator<byte[], byte[]> keyIter = iterator((byte[]) null,
KeyValueIterator.Type.NEITHER)) {
keyIter.seekToFirst();
return !keyIter.hasNext();
@@ -101,7 +98,7 @@ public boolean isEmpty() throws IOException {
}
@Override
- public boolean isExist(byte[] key) throws IOException {
+ public boolean isExist(byte[] key) throws RocksDatabaseException {
rdbMetrics.incNumDBKeyMayExistChecks();
final Supplier<byte[]> holder = db.keyMayExist(family, key);
if (holder == null) {
@@ -121,12 +118,12 @@ public boolean isExist(byte[] key) throws IOException {
}
@Override
- public byte[] get(byte[] key) throws IOException {
+ public byte[] get(byte[] key) throws RocksDatabaseException {
rdbMetrics.incNumDBKeyGets();
return db.get(family, key);
}
- Integer get(ByteBuffer key, ByteBuffer outValue) throws IOException {
+ Integer get(ByteBuffer key, ByteBuffer outValue) throws
RocksDatabaseException {
return db.get(family, key, outValue);
}
@@ -136,15 +133,14 @@ Integer get(ByteBuffer key, ByteBuffer outValue) throws
IOException {
*
* @param bytes metadata key
* @return value in byte array or null if the key is not found.
- * @throws IOException on Failure
*/
@Override
- public byte[] getSkipCache(byte[] bytes) throws IOException {
+ public byte[] getSkipCache(byte[] bytes) throws RocksDatabaseException {
return get(bytes);
}
@Override
- public byte[] getIfExist(byte[] key) throws IOException {
+ public byte[] getIfExist(byte[] key) throws RocksDatabaseException {
rdbMetrics.incNumDBKeyGetIfExistChecks();
final Supplier<byte[]> value = db.keyMayExist(family, key);
if (value == null) {
@@ -163,7 +159,7 @@ public byte[] getIfExist(byte[] key) throws IOException {
return val;
}
- Integer getIfExist(ByteBuffer key, ByteBuffer outValue) throws IOException {
+ Integer getIfExist(ByteBuffer key, ByteBuffer outValue) throws
RocksDatabaseException {
rdbMetrics.incNumDBKeyGetIfExistChecks();
final Supplier<Integer> value = db.keyMayExist(
family, key, outValue.duplicate());
@@ -185,22 +181,21 @@ Integer getIfExist(ByteBuffer key, ByteBuffer outValue)
throws IOException {
}
@Override
- public void delete(byte[] key) throws IOException {
+ public void delete(byte[] key) throws RocksDatabaseException {
db.delete(family, key);
}
- public void delete(ByteBuffer key) throws IOException {
+ public void delete(ByteBuffer key) throws RocksDatabaseException {
db.delete(family, key);
}
@Override
- public void deleteRange(byte[] beginKey, byte[] endKey) throws IOException {
+ public void deleteRange(byte[] beginKey, byte[] endKey) throws
RocksDatabaseException {
db.deleteRange(family, beginKey, endKey);
}
@Override
- public void deleteWithBatch(BatchOperation batch, byte[] key)
- throws IOException {
+ public void deleteWithBatch(BatchOperation batch, byte[] key) {
if (batch instanceof RDBBatchOperation) {
((RDBBatchOperation) batch).delete(family, key);
} else {
@@ -217,7 +212,7 @@ public KeyValueIterator<byte[], byte[]> iterator(byte[]
prefix, KeyValueIterator
}
KeyValueIterator<CodecBuffer, CodecBuffer> iterator(
- CodecBuffer prefix, KeyValueIterator.Type type) throws IOException {
+ CodecBuffer prefix, KeyValueIterator.Type type) throws
RocksDatabaseException {
return new RDBStoreCodecBufferIterator(db.newIterator(family, false),
this, prefix, type);
}
@@ -228,34 +223,30 @@ public String getName() {
}
@Override
- public void close() throws Exception {
+ public void close() {
// Nothing do for a Column Family.
}
@Override
- public long getEstimatedKeyCount() throws IOException {
+ public long getEstimatedKeyCount() throws RocksDatabaseException {
return db.estimateNumKeys(family);
}
@Override
- public List<KeyValue<byte[], byte[]>> getRangeKVs(byte[] startKey,
- int count, byte[] prefix,
- MetadataKeyFilters.MetadataKeyFilter... filters)
- throws IOException, IllegalArgumentException {
+ public List<KeyValue<byte[], byte[]>> getRangeKVs(byte[] startKey, int
count, byte[] prefix,
+ MetadataKeyFilters.MetadataKeyFilter... filters) throws
RocksDatabaseException, CodecException {
return getRangeKVs(startKey, count, false, prefix, filters);
}
@Override
- public List<KeyValue<byte[], byte[]>> getSequentialRangeKVs(byte[] startKey,
- int count, byte[] prefix,
- MetadataKeyFilters.MetadataKeyFilter... filters)
- throws IOException, IllegalArgumentException {
+ public List<KeyValue<byte[], byte[]>> getSequentialRangeKVs(byte[] startKey,
int count, byte[] prefix,
+ MetadataKeyFilters.MetadataKeyFilter... filters) throws
RocksDatabaseException, CodecException {
return getRangeKVs(startKey, count, true, prefix, filters);
}
@Override
public void deleteBatchWithPrefix(BatchOperation batch, byte[] prefix)
- throws IOException {
+ throws RocksDatabaseException, CodecException {
try (KeyValueIterator<byte[], byte[]> iter = iterator(prefix)) {
while (iter.hasNext()) {
deleteWithBatch(batch, iter.next().getKey());
@@ -265,7 +256,7 @@ public void deleteBatchWithPrefix(BatchOperation batch,
byte[] prefix)
@Override
public void dumpToFileWithPrefix(File externalFile, byte[] prefix)
- throws IOException {
+ throws RocksDatabaseException, CodecException {
try (KeyValueIterator<byte[], byte[]> iter = iterator(prefix);
RDBSstFileWriter fileWriter = new RDBSstFileWriter(externalFile)) {
while (iter.hasNext()) {
@@ -280,10 +271,8 @@ public void loadFromFile(File externalFile) throws
RocksDatabaseException {
RDBSstFileLoader.load(db, family, externalFile);
}
- private List<KeyValue<byte[], byte[]>> getRangeKVs(byte[] startKey,
- int count, boolean sequential, byte[] prefix,
- MetadataKeyFilters.MetadataKeyFilter... filters)
- throws IOException, IllegalArgumentException {
+ private List<KeyValue<byte[], byte[]>> getRangeKVs(byte[] startKey, int
count, boolean sequential, byte[] prefix,
+ MetadataKeyFilters.MetadataKeyFilter... filters) throws
RocksDatabaseException, CodecException {
long start = Time.monotonicNow();
if (count < 0) {
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
index 475e1bf0bba..d96012974fa 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hdds.utils.db;
import java.io.File;
-import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -45,7 +44,7 @@ public interface Table<KEY, VALUE> extends AutoCloseable {
* @param key metadata key
* @param value metadata value
*/
- void put(KEY key, VALUE value) throws IOException;
+ void put(KEY key, VALUE value) throws RocksDatabaseException, CodecException;
/**
* Puts a key-value pair into the store as part of a bath operation.
@@ -54,14 +53,12 @@ public interface Table<KEY, VALUE> extends AutoCloseable {
* @param key metadata key
* @param value metadata value
*/
- void putWithBatch(BatchOperation batch, KEY key, VALUE value)
- throws IOException;
+ void putWithBatch(BatchOperation batch, KEY key, VALUE value) throws
RocksDatabaseException, CodecException;
/**
* @return true if the metadata store is empty.
- * @throws IOException on Failure
*/
- boolean isEmpty() throws IOException;
+ boolean isEmpty() throws RocksDatabaseException;
/**
* Check if a given key exists in Metadata store.
@@ -69,9 +66,8 @@ void putWithBatch(BatchOperation batch, KEY key, VALUE value)
* A lock on the key / bucket needs to be acquired before invoking this API.
* @param key metadata key
* @return true if the metadata store contains a key.
- * @throws IOException on Failure
*/
- boolean isExist(KEY key) throws IOException;
+ boolean isExist(KEY key) throws RocksDatabaseException, CodecException;
/**
* Returns the value mapped to the given key in byte array or returns null
@@ -79,9 +75,8 @@ void putWithBatch(BatchOperation batch, KEY key, VALUE value)
*
* @param key metadata key
* @return value in byte array or null if the key is not found.
- * @throws IOException on Failure
*/
- VALUE get(KEY key) throws IOException;
+ VALUE get(KEY key) throws RocksDatabaseException, CodecException;
/**
* Skip checking cache and get the value mapped to the given key in byte
@@ -89,9 +84,8 @@ void putWithBatch(BatchOperation batch, KEY key, VALUE value)
*
* @param key metadata key
* @return value in byte array or null if the key is not found.
- * @throws IOException on Failure
*/
- default VALUE getSkipCache(KEY key) throws IOException {
+ default VALUE getSkipCache(KEY key) throws RocksDatabaseException,
CodecException {
throw new NotImplementedException("getSkipCache is not implemented");
}
@@ -104,9 +98,8 @@ default VALUE getSkipCache(KEY key) throws IOException {
*
* @param key metadata key
* @return value in byte array or null if the key is not found.
- * @throws IOException on Failure
*/
- default VALUE getReadCopy(KEY key) throws IOException {
+ default VALUE getReadCopy(KEY key) throws RocksDatabaseException,
CodecException {
throw new NotImplementedException("getReadCopy is not implemented");
}
@@ -123,43 +116,39 @@ default VALUE getReadCopy(KEY key) throws IOException {
*
* @param key metadata key
* @return value in byte array or null if the key is not found.
- * @throws IOException on Failure
*/
- VALUE getIfExist(KEY key) throws IOException;
+ VALUE getIfExist(KEY key) throws RocksDatabaseException, CodecException;
/**
* Deletes a key from the metadata store.
*
* @param key metadata key
- * @throws IOException on Failure
*/
- void delete(KEY key) throws IOException;
+ void delete(KEY key) throws RocksDatabaseException, CodecException;
/**
* Deletes a key from the metadata store as part of a batch operation.
*
* @param batch the batch operation
* @param key metadata key
- * @throws IOException on Failure
*/
- void deleteWithBatch(BatchOperation batch, KEY key) throws IOException;
+ void deleteWithBatch(BatchOperation batch, KEY key) throws CodecException;
/**
* Deletes a range of keys from the metadata store.
*
* @param beginKey start metadata key
* @param endKey end metadata key
- * @throws IOException on Failure
*/
- void deleteRange(KEY beginKey, KEY endKey) throws IOException;
+ void deleteRange(KEY beginKey, KEY endKey) throws RocksDatabaseException,
CodecException;
/** The same as iterator(null). */
- default KeyValueIterator<KEY, VALUE> iterator() throws IOException {
+ default KeyValueIterator<KEY, VALUE> iterator() throws
RocksDatabaseException, CodecException {
return iterator(null);
}
/** The same as iterator(prefix, KEY_AND_VALUE). */
- default KeyValueIterator<KEY, VALUE> iterator(KEY prefix) throws IOException
{
+ default KeyValueIterator<KEY, VALUE> iterator(KEY prefix) throws
RocksDatabaseException, CodecException {
return iterator(prefix, KeyValueIterator.Type.KEY_AND_VALUE);
}
@@ -171,7 +160,7 @@ default KeyValueIterator<KEY, VALUE> iterator(KEY prefix)
throws IOException {
* @return an iterator.
*/
KeyValueIterator<KEY, VALUE> iterator(KEY prefix, KeyValueIterator.Type type)
- throws IOException;
+ throws RocksDatabaseException, CodecException;
/**
* Returns the Name of this Table.
@@ -182,9 +171,8 @@ KeyValueIterator<KEY, VALUE> iterator(KEY prefix,
KeyValueIterator.Type type)
/**
* Returns the key count of this Table. Note the result can be inaccurate.
* @return Estimated key count of this Table
- * @throws IOException on failure
*/
- long getEstimatedKeyCount() throws IOException;
+ long getEstimatedKeyCount() throws RocksDatabaseException;
/**
* Add entry to the table cache.
@@ -237,7 +225,7 @@ default void cleanupCache(List<Long> epochs) {
/**
* Create the metrics datasource that emits table cache metrics.
*/
- default TableCacheMetrics createCacheMetrics() throws IOException {
+ default TableCacheMetrics createCacheMetrics() throws RocksDatabaseException
{
throw new NotImplementedException("getCacheValue is not implemented");
}
@@ -270,13 +258,12 @@ default TableCacheMetrics createCacheMetrics() throws
IOException {
* {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter}.
* @return a list of entries found in the database or an empty list if the
* startKey is invalid.
- * @throws IOException if there are I/O errors.
* @throws IllegalArgumentException if count is less than 0.
*/
List<KeyValue<KEY, VALUE>> getRangeKVs(KEY startKey,
int count, KEY prefix,
MetadataKeyFilters.MetadataKeyFilter... filters)
- throws IOException, IllegalArgumentException;
+ throws RocksDatabaseException, CodecException;
/**
* This method is very similar to {@link #getRangeKVs}, the only
@@ -292,13 +279,11 @@ List<KeyValue<KEY, VALUE>> getRangeKVs(KEY startKey,
* @param filters customized one or more
* {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter}.
* @return a list of entries found in the database.
- * @throws IOException
- * @throws IllegalArgumentException
*/
List<KeyValue<KEY, VALUE>> getSequentialRangeKVs(KEY startKey,
int count, KEY prefix,
MetadataKeyFilters.MetadataKeyFilter... filters)
- throws IOException, IllegalArgumentException;
+ throws RocksDatabaseException, CodecException;
/**
* Deletes all keys with the specified prefix from the metadata store
@@ -306,24 +291,21 @@ List<KeyValue<KEY, VALUE>> getSequentialRangeKVs(KEY
startKey,
* @param batch
* @param prefix
*/
- void deleteBatchWithPrefix(BatchOperation batch, KEY prefix)
- throws IOException;
+ void deleteBatchWithPrefix(BatchOperation batch, KEY prefix) throws
RocksDatabaseException, CodecException;
/**
* Dump all key value pairs with a prefix into an external file.
* @param externalFile
* @param prefix
- * @throws IOException
*/
- void dumpToFileWithPrefix(File externalFile, KEY prefix) throws IOException;
+ void dumpToFileWithPrefix(File externalFile, KEY prefix) throws
RocksDatabaseException, CodecException;
/**
* Load key value pairs from an external file created by
* dumpToFileWithPrefix.
* @param externalFile
- * @throws IOException
*/
- void loadFromFile(File externalFile) throws IOException;
+ void loadFromFile(File externalFile) throws RocksDatabaseException;
/**
* Class used to represent the key and value pair of a db entry.
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TableIterator.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TableIterator.java
index 73dc2caabf3..85c02fa9530 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TableIterator.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TableIterator.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hdds.utils.db;
import java.io.Closeable;
-import java.io.IOException;
import java.util.Iterator;
/**
@@ -28,6 +27,8 @@
* @param <T> The type to be iterated.
*/
public interface TableIterator<KEY, T> extends Iterator<T>, Closeable {
+ @Override
+ void close() throws RocksDatabaseException;
/**
* seek to first entry.
@@ -45,14 +46,12 @@ public interface TableIterator<KEY, T> extends Iterator<T>,
Closeable {
* @param key - Bytes that represent the key.
* @return VALUE.
*/
- T seek(KEY key) throws IOException;
+ T seek(KEY key) throws RocksDatabaseException, CodecException;
/**
* Remove the actual value of the iterator from the database table on
* which the iterator is working on.
- *
- * @throws IOException when there is an error occurred during deletion.
*/
- void removeFromDB() throws IOException;
+ void removeFromDB() throws RocksDatabaseException, CodecException;
}
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
index 325d938a168..173acb883e0 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
@@ -23,7 +23,6 @@
import com.google.common.annotations.VisibleForTesting;
import java.io.File;
-import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Iterator;
@@ -75,9 +74,9 @@ public class TypedTable<KEY, VALUE> implements Table<KEY,
VALUE> {
* @param keyCodec The key codec.
* @param valueCodec The value codec.
* @param cacheType How to cache the entries?
- * @throws IOException
*/
- TypedTable(RDBTable rawTable, Codec<KEY> keyCodec, Codec<VALUE> valueCodec,
CacheType cacheType) throws IOException {
+ TypedTable(RDBTable rawTable, Codec<KEY> keyCodec, Codec<VALUE> valueCodec,
CacheType cacheType)
+ throws RocksDatabaseException, CodecException {
this.rawTable = Objects.requireNonNull(rawTable, "rawTable==null");
this.keyCodec = Objects.requireNonNull(keyCodec, "keyCodec == null");
this.valueCodec = Objects.requireNonNull(valueCodec, "valueCodec == null");
@@ -111,15 +110,15 @@ public class TypedTable<KEY, VALUE> implements Table<KEY,
VALUE> {
}
}
- private CodecBuffer encodeKeyCodecBuffer(KEY key) throws IOException {
+ private CodecBuffer encodeKeyCodecBuffer(KEY key) throws CodecException {
return key == null ? null : keyCodec.toDirectCodecBuffer(key);
}
- private byte[] encodeKey(KEY key) throws IOException {
+ private byte[] encodeKey(KEY key) throws CodecException {
return key == null ? null : keyCodec.toPersistedFormat(key);
}
- private byte[] encodeValue(VALUE value) throws IOException {
+ private byte[] encodeValue(VALUE value) throws CodecException {
return value == null ? null : valueCodec.toPersistedFormat(value);
}
@@ -132,7 +131,7 @@ private VALUE decodeValue(byte[] value) throws
CodecException {
}
@Override
- public void put(KEY key, VALUE value) throws IOException {
+ public void put(KEY key, VALUE value) throws RocksDatabaseException,
CodecException {
if (supportCodecBuffer) {
try (CodecBuffer k = keyCodec.toDirectCodecBuffer(key);
CodecBuffer v = valueCodec.toDirectCodecBuffer(value)) {
@@ -144,8 +143,7 @@ public void put(KEY key, VALUE value) throws IOException {
}
@Override
- public void putWithBatch(BatchOperation batch, KEY key, VALUE value)
- throws IOException {
+ public void putWithBatch(BatchOperation batch, KEY key, VALUE value) throws
RocksDatabaseException, CodecException {
if (supportCodecBuffer) {
CodecBuffer keyBuffer = null;
CodecBuffer valueBuffer = null;
@@ -164,12 +162,12 @@ public void putWithBatch(BatchOperation batch, KEY key,
VALUE value)
}
@Override
- public boolean isEmpty() throws IOException {
+ public boolean isEmpty() throws RocksDatabaseException {
return rawTable.isEmpty();
}
@Override
- public boolean isExist(KEY key) throws IOException {
+ public boolean isExist(KEY key) throws RocksDatabaseException,
CodecException {
CacheResult<VALUE> cacheResult =
cache.lookup(new CacheKey<>(key));
@@ -200,10 +198,9 @@ public boolean isExist(KEY key) throws IOException {
*
* @param key metadata key
* @return the mapped value; or null if the key is not found.
- * @throws IOException when {@link #getFromTable(Object)} throw an exception.
*/
@Override
- public VALUE get(KEY key) throws IOException {
+ public VALUE get(KEY key) throws RocksDatabaseException, CodecException {
// Here the metadata lock will guarantee that cache is not updated for same
// key during get key.
@@ -225,10 +222,9 @@ public VALUE get(KEY key) throws IOException {
*
* @param key metadata key
* @return value in byte array or null if the key is not found.
- * @throws IOException on Failure
*/
@Override
- public VALUE getSkipCache(KEY key) throws IOException {
+ public VALUE getSkipCache(KEY key) throws RocksDatabaseException,
CodecException {
return getFromTable(key);
}
@@ -249,10 +245,9 @@ public VALUE getSkipCache(KEY key) throws IOException {
* modifying the same cached object.
* @param key metadata key
* @return VALUE
- * @throws IOException when {@link #getFromTable(Object)} throw an exception.
*/
@Override
- public VALUE getReadCopy(KEY key) throws IOException {
+ public VALUE getReadCopy(KEY key) throws RocksDatabaseException,
CodecException {
// Here the metadata lock will guarantee that cache is not updated for same
// key during get key.
@@ -269,7 +264,7 @@ public VALUE getReadCopy(KEY key) throws IOException {
}
@Override
- public VALUE getIfExist(KEY key) throws IOException {
+ public VALUE getIfExist(KEY key) throws RocksDatabaseException,
CodecException {
// Here the metadata lock will guarantee that cache is not updated for same
// key during get key.
@@ -295,15 +290,13 @@ public VALUE getIfExist(KEY key) throws IOException {
* partial value may be written.
* @return null if the key is not found;
* otherwise, return the size of the value.
- * @throws IOException in case is an error reading from the db.
*/
- private Integer getFromTable(CodecBuffer key, CodecBuffer outValue)
- throws IOException {
+ private Integer getFromTable(CodecBuffer key, CodecBuffer outValue) throws
RocksDatabaseException {
return outValue.putFromSource(
buffer -> rawTable.get(key.asReadOnlyByteBuffer(), buffer));
}
- private VALUE getFromTable(KEY key) throws IOException {
+ private VALUE getFromTable(KEY key) throws RocksDatabaseException,
CodecException {
if (supportCodecBuffer) {
return getFromTable(key, this::getFromTable);
} else {
@@ -317,15 +310,14 @@ private VALUE getFromTable(KEY key) throws IOException {
* Similar to {@link #getFromTable(CodecBuffer, CodecBuffer)} except that
* this method use {@link RDBTable#getIfExist(ByteBuffer, ByteBuffer)}.
*/
- private Integer getFromTableIfExist(CodecBuffer key, CodecBuffer outValue)
- throws IOException {
+ private Integer getFromTableIfExist(CodecBuffer key, CodecBuffer outValue)
throws RocksDatabaseException {
return outValue.putFromSource(
buffer -> rawTable.getIfExist(key.asReadOnlyByteBuffer(), buffer));
}
private VALUE getFromTable(KEY key,
- CheckedBiFunction<CodecBuffer, CodecBuffer, Integer, IOException> get)
- throws IOException {
+ CheckedBiFunction<CodecBuffer, CodecBuffer, Integer,
RocksDatabaseException> get)
+ throws RocksDatabaseException, CodecException {
try (CodecBuffer inKey = keyCodec.toDirectCodecBuffer(key)) {
for (; ;) {
final Integer required;
@@ -362,7 +354,7 @@ private VALUE getFromTable(KEY key,
}
}
- private VALUE getFromTableIfExist(KEY key) throws IOException {
+ private VALUE getFromTableIfExist(KEY key) throws RocksDatabaseException,
CodecException {
if (supportCodecBuffer) {
return getFromTable(key, this::getFromTableIfExist);
} else {
@@ -373,7 +365,7 @@ private VALUE getFromTableIfExist(KEY key) throws
IOException {
}
@Override
- public void delete(KEY key) throws IOException {
+ public void delete(KEY key) throws RocksDatabaseException, CodecException {
if (keyCodec.supportCodecBuffer()) {
try (CodecBuffer buffer = keyCodec.toDirectCodecBuffer(key)) {
rawTable.delete(buffer.asReadOnlyByteBuffer());
@@ -384,18 +376,18 @@ public void delete(KEY key) throws IOException {
}
@Override
- public void deleteWithBatch(BatchOperation batch, KEY key)
- throws IOException {
+ public void deleteWithBatch(BatchOperation batch, KEY key) throws
CodecException {
rawTable.deleteWithBatch(batch, encodeKey(key));
}
@Override
- public void deleteRange(KEY beginKey, KEY endKey) throws IOException {
+ public void deleteRange(KEY beginKey, KEY endKey) throws
RocksDatabaseException, CodecException {
rawTable.deleteRange(encodeKey(beginKey), encodeKey(endKey));
}
@Override
- public KeyValueIterator<KEY, VALUE> iterator(KEY prefix,
KeyValueIterator.Type type) throws IOException {
+ public KeyValueIterator<KEY, VALUE> iterator(KEY prefix,
KeyValueIterator.Type type)
+ throws RocksDatabaseException, CodecException {
if (supportCodecBuffer) {
final CodecBuffer prefixBuffer = encodeKeyCodecBuffer(prefix);
try {
@@ -423,7 +415,7 @@ public String toString() {
}
@Override
- public long getEstimatedKeyCount() throws IOException {
+ public long getEstimatedKeyCount() throws RocksDatabaseException {
if (cache.getCacheType() == CacheType.FULL_CACHE) {
return cache.size();
}
@@ -431,7 +423,7 @@ public long getEstimatedKeyCount() throws IOException {
}
@Override
- public void close() throws Exception {
+ public void close() {
rawTable.close();
}
@@ -462,7 +454,7 @@ public TableCacheMetrics createCacheMetrics() {
public List<KeyValue<KEY, VALUE>> getRangeKVs(
KEY startKey, int count, KEY prefix,
MetadataKeyFilters.MetadataKeyFilter... filters)
- throws IOException, IllegalArgumentException {
+ throws RocksDatabaseException, CodecException {
// A null start key means to start from the beginning of the table.
// Cannot convert a null key to bytes.
@@ -478,7 +470,7 @@ public List<KeyValue<KEY, VALUE>> getRangeKVs(
public List<KeyValue<KEY, VALUE>> getSequentialRangeKVs(
KEY startKey, int count, KEY prefix,
MetadataKeyFilters.MetadataKeyFilter... filters)
- throws IOException, IllegalArgumentException {
+ throws RocksDatabaseException, CodecException {
// A null start key means to start from the beginning of the table.
// Cannot convert a null key to bytes.
@@ -500,19 +492,17 @@ private List<KeyValue<KEY, VALUE>>
convert(List<KeyValue<byte[], byte[]>> rangeK
}
@Override
- public void deleteBatchWithPrefix(BatchOperation batch, KEY prefix)
- throws IOException {
+ public void deleteBatchWithPrefix(BatchOperation batch, KEY prefix) throws
RocksDatabaseException, CodecException {
rawTable.deleteBatchWithPrefix(batch, encodeKey(prefix));
}
@Override
- public void dumpToFileWithPrefix(File externalFile, KEY prefix)
- throws IOException {
+ public void dumpToFileWithPrefix(File externalFile, KEY prefix) throws
RocksDatabaseException, CodecException {
rawTable.dumpToFileWithPrefix(externalFile, encodeKey(prefix));
}
@Override
- public void loadFromFile(File externalFile) throws IOException {
+ public void loadFromFile(File externalFile) throws RocksDatabaseException {
rawTable.loadFromFile(externalFile);
}
@@ -530,7 +520,7 @@ RawIterator<CodecBuffer> newCodecBufferTableIterator(
KeyValueIterator<CodecBuffer, CodecBuffer> i) {
return new RawIterator<CodecBuffer>(i) {
@Override
- AutoCloseSupplier<CodecBuffer> convert(KEY key) throws IOException {
+ AutoCloseSupplier<CodecBuffer> convert(KEY key) throws CodecException {
final CodecBuffer buffer = encodeKeyCodecBuffer(key);
return new AutoCloseSupplier<CodecBuffer>() {
@Override
@@ -568,7 +558,7 @@ public class TypedTableIterator extends RawIterator<byte[]>
{
}
@Override
- AutoCloseSupplier<byte[]> convert(KEY key) throws IOException {
+ AutoCloseSupplier<byte[]> convert(KEY key) throws CodecException {
final byte[] keyArray = encodeKey(key);
return () -> keyArray;
}
@@ -594,7 +584,7 @@ abstract class RawIterator<RAW>
}
/** Covert the given key to the {@link RAW} type. */
- abstract AutoCloseSupplier<RAW> convert(KEY key) throws IOException;
+ abstract AutoCloseSupplier<RAW> convert(KEY key) throws CodecException;
/**
* Covert the given {@link Table.KeyValue}
@@ -613,7 +603,7 @@ public void seekToLast() {
}
@Override
- public KeyValue<KEY, VALUE> seek(KEY key) throws IOException {
+ public KeyValue<KEY, VALUE> seek(KEY key) throws RocksDatabaseException,
CodecException {
try (AutoCloseSupplier<RAW> rawKey = convert(key)) {
final KeyValue<RAW, RAW> result = rawIterator.seek(rawKey.get());
return result == null ? null : convert(result);
@@ -621,7 +611,7 @@ public KeyValue<KEY, VALUE> seek(KEY key) throws
IOException {
}
@Override
- public void close() throws IOException {
+ public void close() throws RocksDatabaseException {
rawIterator.close();
}
@@ -634,13 +624,13 @@ public boolean hasNext() {
public KeyValue<KEY, VALUE> next() {
try {
return convert(rawIterator.next());
- } catch (IOException e) {
+ } catch (CodecException e) {
throw new IllegalStateException("Failed next()", e);
}
}
@Override
- public void removeFromDB() throws IOException {
+ public void removeFromDB() throws RocksDatabaseException, CodecException {
rawIterator.removeFromDB();
}
}
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/MapBackedTableIterator.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/MapBackedTableIterator.java
index 7dc22b6d80f..5af0e671d51 100644
---
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/MapBackedTableIterator.java
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/MapBackedTableIterator.java
@@ -17,7 +17,6 @@
package org.apache.hadoop.hdds.utils;
-import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
import java.util.TreeMap;
@@ -51,7 +50,7 @@ public void seekToLast() {
}
@Override
- public Table.KeyValue<String, V> seek(String s) throws IOException {
+ public Table.KeyValue<String, V> seek(String s) {
this.itr = this.values.entrySet().stream()
.filter(e -> prefix == null || e.getKey().startsWith(prefix))
.filter(e -> e.getKey().compareTo(s) >= 0)
@@ -61,12 +60,12 @@ public Table.KeyValue<String, V> seek(String s) throws
IOException {
}
@Override
- public void removeFromDB() throws IOException {
+ public void removeFromDB() {
}
@Override
- public void close() throws IOException {
+ public void close() {
}
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java
index 6caf7336f4c..0b5f325b83b 100644
---
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hdds.utils.db;
import java.io.File;
-import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Map;
@@ -106,8 +105,7 @@ public long getEstimatedKeyCount() {
@Override
public List<KeyValue<KEY, VALUE>> getRangeKVs(KEY startKey, int count, KEY
prefix,
-
MetadataKeyFilters.MetadataKeyFilter... filters)
- throws IOException, IllegalArgumentException {
+ MetadataKeyFilters.MetadataKeyFilter... filters) {
throw new UnsupportedOperationException();
}
diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml
index e5da6bd5792..80a48482773 100644
--- a/hadoop-hdds/server-scm/pom.xml
+++ b/hadoop-hdds/server-scm/pom.xml
@@ -117,6 +117,10 @@
<groupId>org.apache.ozone</groupId>
<artifactId>hdds-interface-server</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.apache.ozone</groupId>
+ <artifactId>hdds-managed-rocksdb</artifactId>
+ </dependency>
<dependency>
<groupId>org.apache.ozone</groupId>
<artifactId>hdds-server-framework</artifactId>
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java
index f198a425e7e..b824c932d27 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java
@@ -33,6 +33,8 @@
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.ha.SCMRatisServer;
import org.apache.hadoop.hdds.scm.metadata.DBTransactionBuffer;
+import org.apache.hadoop.hdds.utils.db.CodecException;
+import org.apache.hadoop.hdds.utils.db.RocksDatabaseException;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.hdds.utils.db.TypedTable;
@@ -117,7 +119,7 @@ public TypedTable.KeyValue<Long, DeletedBlocksTransaction>
next() {
}
@Override
- public void close() throws IOException {
+ public void close() throws RocksDatabaseException {
iter.close();
}
@@ -133,8 +135,8 @@ public void seekToLast() {
}
@Override
- public TypedTable.KeyValue<Long, DeletedBlocksTransaction> seek(
- Long key) throws IOException {
+ public TypedTable.KeyValue<Long, DeletedBlocksTransaction> seek(Long key)
+ throws RocksDatabaseException, CodecException {
iter.seek(key);
findNext();
return nextTx;
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]