This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 11ada9f9b6f HDDS-13318. Simplify the getRangeKVs methods in Table
(#8683)
11ada9f9b6f is described below
commit 11ada9f9b6feef79833fba942b4a6028903a1632
Author: Tsz-Wo Nicholas Sze <[email protected]>
AuthorDate: Tue Jun 24 12:22:53 2025 -0700
HDDS-13318. Simplify the getRangeKVs methods in Table (#8683)
---
.../container/keyvalue/KeyValueContainerData.java | 4 +-
.../keyvalue/helpers/KeyValueContainerUtil.java | 8 +--
.../container/keyvalue/impl/BlockManagerImpl.java | 6 +-
.../statemachine/background/BlockDeletingTask.java | 6 +-
.../container/metadata/AbstractDatanodeStore.java | 13 ++---
.../metadata/DatanodeStoreSchemaThreeImpl.java | 13 ++---
.../ozone/container/metadata/DatanodeTable.java | 17 ++----
.../metadata/SchemaOneDeletedBlocksTable.java | 30 ++--------
.../keyvalue/TestKeyValueBlockIterator.java | 8 +--
.../hadoop/hdds/utils/MetadataKeyFilters.java | 36 ++++--------
.../org/apache/hadoop/hdds/utils/db/RDBTable.java | 65 ++++++----------------
.../org/apache/hadoop/hdds/utils/db/Table.java | 49 ++++++----------
.../apache/hadoop/hdds/utils/db/TypedTable.java | 31 ++---------
.../hadoop/hdds/utils/db/InMemoryTestTable.java | 12 +---
.../hadoop/hdds/utils/db/TestRDBTableStore.java | 6 +-
.../apache/hadoop/ozone/debug/om/PrefixParser.java | 22 +++-----
16 files changed, 99 insertions(+), 227 deletions(-)
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
index 13603640b4e..022764c227d 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
@@ -430,11 +430,11 @@ public String getDeletingBlockKeyPrefix() {
public KeyPrefixFilter getUnprefixedKeyFilter() {
String schemaPrefix = containerPrefix();
- return new KeyPrefixFilter().addFilter(schemaPrefix + "#", true);
+ return KeyPrefixFilter.newFilter(schemaPrefix + "#", true);
}
public KeyPrefixFilter getDeletingBlockKeyFilter() {
- return new KeyPrefixFilter().addFilter(getDeletingBlockKeyPrefix());
+ return KeyPrefixFilter.newFilter(getDeletingBlockKeyPrefix());
}
/**
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index 884f16f77a8..885003ac1b8 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -330,10 +330,10 @@ private static void populateContainerMetadata(
LOG.warn("Missing pendingDeleteBlockCount from {}: recalculate them from
block table", metadataTable.getName());
MetadataKeyFilters.KeyPrefixFilter filter =
kvContainerData.getDeletingBlockKeyFilter();
- blockPendingDeletion = store.getBlockDataTable()
- .getSequentialRangeKVs(kvContainerData.startKeyEmpty(),
- Integer.MAX_VALUE, kvContainerData.containerPrefix(),
- filter).size();
+ blockPendingDeletion = store.getBlockDataTable().getRangeKVs(
+ kvContainerData.startKeyEmpty(), Integer.MAX_VALUE,
kvContainerData.containerPrefix(), filter, true)
+ // TODO: add a count() method to avoid creating a list
+ .size();
}
// Set delete transaction id.
Long delTxnId =
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
index 0c89ce91ac0..a8fe877e9ea 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -423,10 +423,8 @@ public List<BlockData> listBlock(Container container, long
startLocalID, int
result = new ArrayList<>();
String startKey = (startLocalID == -1) ? cData.startKeyEmpty()
: cData.getBlockKey(startLocalID);
- List<Table.KeyValue<String, BlockData>> range =
- db.getStore().getBlockDataTable()
- .getSequentialRangeKVs(startKey, count,
- cData.containerPrefix(), cData.getUnprefixedKeyFilter());
+ final List<Table.KeyValue<String, BlockData>> range =
db.getStore().getBlockDataTable().getRangeKVs(
+ startKey, count, cData.containerPrefix(),
cData.getUnprefixedKeyFilter(), true);
for (Table.KeyValue<String, BlockData> entry : range) {
result.add(db.getStore().getCompleteBlockData(entry.getValue(),
null, entry.getKey()));
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java
index c2776b30335..ed5138b9b3c 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java
@@ -184,10 +184,8 @@ public ContainerBackgroundTaskResult deleteViaSchema1(
// # of blocks to delete is throttled
KeyPrefixFilter filter = containerData.getDeletingBlockKeyFilter();
- List<Table.KeyValue<String, BlockData>> toDeleteBlocks = blockDataTable
- .getSequentialRangeKVs(containerData.startKeyEmpty(),
- (int) blocksToDelete, containerData.containerPrefix(),
- filter);
+ final List<Table.KeyValue<String, BlockData>> toDeleteBlocks =
blockDataTable.getRangeKVs(
+ containerData.startKeyEmpty(), (int) blocksToDelete,
containerData.containerPrefix(), filter, true);
if (toDeleteBlocks.isEmpty()) {
LOG.debug("No under deletion block found in container : {}",
containerData.getContainerID());
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
index 7e55a019949..34af94084f0 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
@@ -196,9 +196,8 @@ protected static void checkTableStatus(Table<?, ?> table,
String name)
/**
* Block Iterator for KeyValue Container. This block iterator returns blocks
- * which match with the {@link
org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter}. If no
- * filter is specified, then default filter used is
- * {@link
org.apache.hadoop.hdds.utils.MetadataKeyFilters#getUnprefixedKeyFilter()}
+ * which match with the {@link KeyPrefixFilter}.
+ * The default filter is {@link #DEFAULT_BLOCK_FILTER}.
*/
@InterfaceAudience.Public
public static class KeyValueBlockIterator implements
@@ -263,7 +262,7 @@ public boolean hasNext() throws IOException {
while (blockIterator.hasNext()) {
Table.KeyValue<String, BlockData> keyValue = blockIterator.next();
byte[] keyBytes = StringUtils.string2Bytes(keyValue.getKey());
- if (blockFilter.filterKey(null, keyBytes, null)) {
+ if (blockFilter.filterKey(keyBytes)) {
nextBlock = keyValue.getValue();
if (LOG.isTraceEnabled()) {
LOG.trace("Block matching with filter found: blockID is : {} for "
+
@@ -296,9 +295,7 @@ public void close() throws IOException {
/**
* Block localId Iterator for KeyValue Container.
* This Block localId iterator returns localIds
- * which match with the {@link
org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter}. If no
- * filter is specified, then default filter used is
- * {@link
org.apache.hadoop.hdds.utils.MetadataKeyFilters#getUnprefixedKeyFilter()}
+ * which match with the {@link KeyPrefixFilter}.
*/
@InterfaceAudience.Public
public static class KeyValueBlockLocalIdIterator implements
@@ -351,7 +348,7 @@ public boolean hasNext() throws IOException {
while (blockLocalIdIterator.hasNext()) {
Table.KeyValue<String, Long> keyValue = blockLocalIdIterator.next();
byte[] keyBytes = StringUtils.string2Bytes(keyValue.getKey());
- if (localIdFilter.filterKey(null, keyBytes, null)) {
+ if (localIdFilter.filterKey(keyBytes)) {
nextLocalId = keyValue.getValue();
if (LOG.isTraceEnabled()) {
LOG.trace("Block matching with filter found: LocalID is : " +
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java
index 98adc30abd8..cd246f00ddd 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java
@@ -29,7 +29,7 @@
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
+import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.hdds.utils.db.Codec;
import org.apache.hadoop.hdds.utils.db.FixedLengthStringCodec;
@@ -89,21 +89,20 @@ public BlockIterator<BlockData> getBlockIterator(long
containerID)
return new KeyValueBlockIterator(containerID,
getBlockDataTableWithIterator()
.iterator(getContainerKeyPrefix(containerID)),
- new MetadataKeyFilters.KeyPrefixFilter().addFilter(
- getContainerKeyPrefix(containerID) + "#", true));
+ KeyPrefixFilter.newFilter(getContainerKeyPrefix(containerID) + "#",
true));
}
@Override
- public BlockIterator<BlockData> getBlockIterator(long containerID,
- MetadataKeyFilters.KeyPrefixFilter filter) throws IOException {
+ public BlockIterator<BlockData> getBlockIterator(long containerID,
KeyPrefixFilter filter)
+ throws IOException {
return new KeyValueBlockIterator(containerID,
getBlockDataTableWithIterator()
.iterator(getContainerKeyPrefix(containerID)), filter);
}
@Override
- public BlockIterator<Long> getFinalizeBlockIterator(long containerID,
- MetadataKeyFilters.KeyPrefixFilter filter) throws IOException {
+ public BlockIterator<Long> getFinalizeBlockIterator(long containerID,
KeyPrefixFilter filter)
+ throws IOException {
return new KeyValueBlockLocalIdIterator(containerID,
getFinalizeBlocksTableWithIterator().iterator(getContainerKeyPrefix(containerID)),
filter);
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeTable.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeTable.java
index 9c40ce5aac6..2621b1f7d85 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeTable.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeTable.java
@@ -19,7 +19,7 @@
import java.io.File;
import java.util.List;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
+import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.hdds.utils.db.CodecException;
import org.apache.hadoop.hdds.utils.db.RocksDatabaseException;
@@ -111,18 +111,9 @@ public VALUE getReadCopy(KEY key) throws
RocksDatabaseException, CodecException
@Override
public List<KeyValue<KEY, VALUE>> getRangeKVs(
- KEY startKey, int count, KEY prefix,
- MetadataKeyFilters.MetadataKeyFilter... filters)
- throws RocksDatabaseException, CodecException {
- return table.getRangeKVs(startKey, count, prefix, filters);
- }
-
- @Override
- public List<KeyValue<KEY, VALUE>> getSequentialRangeKVs(
- KEY startKey, int count, KEY prefix,
- MetadataKeyFilters.MetadataKeyFilter... filters)
- throws RocksDatabaseException, CodecException {
- return table.getSequentialRangeKVs(startKey, count, prefix, filters);
+ KEY startKey, int count, KEY prefix, KeyPrefixFilter filter, boolean
isSequential)
+ throws RocksDatabaseException, CodecException {
+ return table.getRangeKVs(startKey, count, prefix, filter, isSequential);
}
@Override
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneDeletedBlocksTable.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneDeletedBlocksTable.java
index 1e55e302fb1..913d6e30d1a 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneDeletedBlocksTable.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneDeletedBlocksTable.java
@@ -19,7 +19,7 @@
import java.util.List;
import java.util.stream.Collectors;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
+import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.hdds.utils.db.CodecException;
import org.apache.hadoop.hdds.utils.db.RocksDatabaseException;
@@ -46,6 +46,7 @@
public class SchemaOneDeletedBlocksTable extends DatanodeTable<String,
ChunkInfoList> {
public static final String DELETED_KEY_PREFIX = "#deleted#";
+ private static final KeyPrefixFilter DELETED_FILTER =
KeyPrefixFilter.newFilter(DELETED_KEY_PREFIX);
public SchemaOneDeletedBlocksTable(Table<String, ChunkInfoList> table) {
super(table);
@@ -99,28 +100,12 @@ public ChunkInfoList getReadCopy(String key) throws
RocksDatabaseException, Code
@Override
public List<KeyValue<String, ChunkInfoList>> getRangeKVs(
- String startKey, int count, String prefix,
- MetadataKeyFilters.MetadataKeyFilter... filters)
- throws RocksDatabaseException, CodecException {
-
- // Deleted blocks will always have the #deleted# key prefix and nothing
- // else in this schema version. Ignore any user passed prefixes that could
- // collide with this and return results that are not deleted blocks.
- return unprefix(super.getRangeKVs(prefix(startKey), count,
- prefix, getDeletedFilter()));
- }
-
- @Override
- public List<KeyValue<String, ChunkInfoList>> getSequentialRangeKVs(
- String startKey, int count, String prefix,
- MetadataKeyFilters.MetadataKeyFilter... filters)
- throws RocksDatabaseException, CodecException {
-
+ String startKey, int count, String prefix, KeyPrefixFilter filter,
boolean isSequential)
+ throws RocksDatabaseException, CodecException {
// Deleted blocks will always have the #deleted# key prefix and nothing
// else in this schema version. Ignore any user passed prefixes that could
// collide with this and return results that are not deleted blocks.
- return unprefix(super.getSequentialRangeKVs(prefix(startKey), count,
- prefix, getDeletedFilter()));
+ return unprefix(super.getRangeKVs(prefix(startKey), count, prefix,
DELETED_FILTER, isSequential));
}
private static String prefix(String key) {
@@ -148,9 +133,4 @@ private static List<KeyValue<String, ChunkInfoList>>
unprefix(
.map(kv -> Table.newKeyValue(unprefix(kv.getKey()), kv.getValue()))
.collect(Collectors.toList());
}
-
- private static MetadataKeyFilters.KeyPrefixFilter getDeletedFilter() {
- return (new MetadataKeyFilters.KeyPrefixFilter())
- .addFilter(DELETED_KEY_PREFIX);
- }
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
index 034bc964065..aa0e668c82a 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
@@ -38,7 +38,7 @@
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
+import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
@@ -305,16 +305,14 @@ public void testKeyValueBlockIteratorWithAdvancedFilter(
// Test arbitrary filter.
String schemaPrefix = containerData.containerPrefix();
- MetadataKeyFilters.KeyPrefixFilter secondFilter =
- new MetadataKeyFilters.KeyPrefixFilter()
- .addFilter(schemaPrefix + secondPrefix);
+ final KeyPrefixFilter secondFilter =
KeyPrefixFilter.newFilter(schemaPrefix + secondPrefix);
testWithFilter(secondFilter, blockIDs.get(secondPrefix));
}
/**
* Helper method to run some iterator tests with a provided filter.
*/
- private void testWithFilter(MetadataKeyFilters.KeyPrefixFilter filter,
+ private void testWithFilter(KeyPrefixFilter filter,
List<Long> expectedIDs) throws Exception {
try (BlockIterator<BlockData> iterator =
db.getStore().getBlockIterator(CONTAINER_ID, filter)) {
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java
index 551560a31f6..a36b38989fd 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java
@@ -54,12 +54,10 @@ public interface MetadataKeyFilter {
/**
* Filter levelDB key with a certain condition.
*
- * @param preKey previous key.
* @param currentKey current key.
- * @param nextKey next key.
* @return true if a certain condition satisfied, return false otherwise.
*/
- boolean filterKey(byte[] preKey, byte[] currentKey, byte[] nextKey);
+ boolean filterKey(byte[] currentKey);
default int getKeysScannedNum() {
return 0;
@@ -74,28 +72,13 @@ default int getKeysHintedNum() {
* Utility class to filter key by a string prefix. This filter
* assumes keys can be parsed to a string.
*/
- public static class KeyPrefixFilter implements MetadataKeyFilter {
-
+ public static final class KeyPrefixFilter implements MetadataKeyFilter {
private List<String> positivePrefixList = new ArrayList<>();
private List<String> negativePrefixList = new ArrayList<>();
- private boolean atleastOnePositiveMatch;
private int keysScanned = 0;
private int keysHinted = 0;
- public KeyPrefixFilter() { }
-
- /**
- * KeyPrefixFilter constructor. It is made of positive and negative prefix
- * list. PositivePrefixList is the list of prefixes which are accepted
- * whereas negativePrefixList contains the list of prefixes which are
- * rejected.
- *
- * @param atleastOnePositiveMatch if positive it requires key to be
accepted
- * by atleast one positive filter.
- */
- public KeyPrefixFilter(boolean atleastOnePositiveMatch) {
- this.atleastOnePositiveMatch = atleastOnePositiveMatch;
- }
+ private KeyPrefixFilter() { }
public KeyPrefixFilter addFilter(String keyPrefix) {
addFilter(keyPrefix, false);
@@ -129,8 +112,7 @@ public KeyPrefixFilter addFilter(String keyPrefix, boolean
negative) {
}
@Override
- public boolean filterKey(byte[] preKey, byte[] currentKey,
- byte[] nextKey) {
+ public boolean filterKey(byte[] currentKey) {
keysScanned++;
if (currentKey == null) {
return false;
@@ -150,8 +132,6 @@ public boolean filterKey(byte[] preKey, byte[] currentKey,
if (accept) {
keysHinted++;
return true;
- } else if (atleastOnePositiveMatch) {
- return false;
}
accept = !negativePrefixList.isEmpty() && negativePrefixList.stream()
@@ -190,5 +170,13 @@ private static boolean prefixMatch(byte[] prefix, byte[]
key) {
}
return true;
}
+
+ public static KeyPrefixFilter newFilter(String prefix) {
+ return newFilter(prefix, false);
+ }
+
+ public static KeyPrefixFilter newFilter(String prefix, boolean negative) {
+ return new KeyPrefixFilter().addFilter(prefix, negative);
+ }
}
}
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
index c46ffb7023e..4ad625ed511 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
@@ -20,11 +20,10 @@
import java.io.File;
import java.nio.ByteBuffer;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.List;
import java.util.function.Supplier;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
+import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
import org.apache.hadoop.hdds.utils.db.RocksDatabase.ColumnFamily;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
@@ -227,18 +226,6 @@ public long getEstimatedKeyCount() throws
RocksDatabaseException {
return db.estimateNumKeys(family);
}
- @Override
- public List<KeyValue<byte[], byte[]>> getRangeKVs(byte[] startKey, int
count, byte[] prefix,
- MetadataKeyFilters.MetadataKeyFilter... filters) throws
RocksDatabaseException, CodecException {
- return getRangeKVs(startKey, count, false, prefix, filters);
- }
-
- @Override
- public List<KeyValue<byte[], byte[]>> getSequentialRangeKVs(byte[] startKey,
int count, byte[] prefix,
- MetadataKeyFilters.MetadataKeyFilter... filters) throws
RocksDatabaseException, CodecException {
- return getRangeKVs(startKey, count, true, prefix, filters);
- }
-
@Override
public void deleteBatchWithPrefix(BatchOperation batch, byte[] prefix)
throws RocksDatabaseException, CodecException {
@@ -266,8 +253,10 @@ public void loadFromFile(File externalFile) throws
RocksDatabaseException {
RDBSstFileLoader.load(db, family, externalFile);
}
- private List<KeyValue<byte[], byte[]>> getRangeKVs(byte[] startKey, int
count, boolean sequential, byte[] prefix,
- MetadataKeyFilters.MetadataKeyFilter... filters) throws
RocksDatabaseException, CodecException {
+ @Override
+ public List<KeyValue<byte[], byte[]>> getRangeKVs(
+ byte[] startKey, int count, byte[] prefix, KeyPrefixFilter filter,
boolean sequential)
+ throws RocksDatabaseException, CodecException {
long start = Time.monotonicNow();
if (count < 0) {
@@ -289,46 +278,28 @@ && get(startKey) == null) {
while (it.hasNext() && result.size() < count) {
final KeyValue<byte[], byte[]> currentEntry = it.next();
- byte[] currentKey = currentEntry.getKey();
-
- if (filters == null) {
+ if (filter == null || filter.filterKey(currentEntry.getKey())) {
result.add(currentEntry);
- } else {
- // NOTE: the preKey and nextKey are never checked
- // in all existing underlying filters, so they could
- // be safely as null here.
- if (Arrays.stream(filters)
- .allMatch(entry -> entry.filterKey(null,
- currentKey, null))) {
- result.add(currentEntry);
- } else {
- if (!result.isEmpty() && sequential) {
- // if the caller asks for a sequential range of results,
- // and we met a dis-match, abort iteration from here.
- // if result is empty, we continue to look for the first match.
- break;
- }
- }
+ } else if (!result.isEmpty() && sequential) {
+ // if the caller asks for a sequential range of results,
+ // and we met a dis-match, abort iteration from here.
+ // if result is empty, we continue to look for the first match.
+ break;
}
}
} finally {
long end = Time.monotonicNow();
long timeConsumed = end - start;
if (LOG.isDebugEnabled()) {
- if (filters != null) {
- for (MetadataKeyFilters.MetadataKeyFilter filter : filters) {
- int scanned = filter.getKeysScannedNum();
- int hinted = filter.getKeysHintedNum();
- if (scanned > 0 || hinted > 0) {
- LOG.debug(
- "getRangeKVs ({}) numOfKeysScanned={}, numOfKeysHinted={}",
- filter.getClass().getSimpleName(),
filter.getKeysScannedNum(),
- filter.getKeysHintedNum());
- }
+ if (filter != null) {
+ final int scanned = filter.getKeysScannedNum();
+ final int hinted = filter.getKeysHintedNum();
+ if (scanned > 0 || hinted > 0) {
+ LOG.debug("getRangeKVs ({}) numOfKeysScanned={},
numOfKeysHinted={}",
+ filter.getClass().getSimpleName(), scanned, hinted);
}
}
- LOG.debug("Time consumed for getRangeKVs() is {}ms,"
- + " result length is {}.", timeConsumed, result.size());
+ LOG.debug("Time consumed for getRangeKVs() is {}ms, result length is
{}.", timeConsumed, result.size());
}
}
return result;
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
index 84a657310c2..0393c8c5147 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
@@ -24,7 +24,7 @@
import java.util.Objects;
import org.apache.commons.lang3.NotImplementedException;
import org.apache.hadoop.hdds.annotation.InterfaceStability;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
+import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
import org.apache.hadoop.hdds.utils.TableCacheMetrics;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
@@ -239,9 +239,7 @@ default TableCacheMetrics createCacheMetrics() throws
RocksDatabaseException {
}
/**
- * Returns a certain range of key value pairs as a list based on a
- * startKey or count. Further a {@link
org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter}
- * can be added to * filter keys if necessary.
+ * Returns a certain range of key value pairs as a list based on a startKey
or count.
* To prevent race conditions while listing
* entries, this implementation takes a snapshot and lists the entries from
* the snapshot. This may, on the other hand, cause the range result slight
@@ -255,44 +253,33 @@ default TableCacheMetrics createCacheMetrics() throws
RocksDatabaseException {
* The count argument is to limit number of total entries to return,
* the value for count must be an integer greater than 0.
* <p>
- * This method allows to specify one or more
- * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter}
- * to filter keys by certain condition. Once given, only the entries
- * whose key passes all the filters will be included in the result.
+ * This method allows to specify a {@link KeyPrefixFilter} to filter keys.
+ * Once given, only the entries whose key passes all the filters will be
included in the result.
*
* @param startKey a start key.
* @param count max number of entries to return.
* @param prefix fixed key schema specific prefix
- * @param filters customized one or more
- * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter}.
+ * @param filter for filtering keys
+ * @param isSequential does it require sequential keys?
* @return a list of entries found in the database or an empty list if the
* startKey is invalid.
* @throws IllegalArgumentException if count is less than 0.
*/
List<KeyValue<KEY, VALUE>> getRangeKVs(KEY startKey,
- int count, KEY prefix,
- MetadataKeyFilters.MetadataKeyFilter... filters)
+ int count, KEY prefix, KeyPrefixFilter filter, boolean isSequential)
throws RocksDatabaseException, CodecException;
- /**
- * This method is very similar to {@link #getRangeKVs}, the only
- * different is this method is supposed to return a sequential range
- * of elements based on the filters. While iterating the elements,
- * if it met any entry that cannot pass the filter, the iterator will stop
- * from this point without looking for next match. If no filter is given,
- * this method behaves just like {@link #getRangeKVs}.
- *
- * @param startKey a start key.
- * @param count max number of entries to return.
- * @param prefix fixed key schema specific prefix
- * @param filters customized one or more
- * {@link org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter}.
- * @return a list of entries found in the database.
- */
- List<KeyValue<KEY, VALUE>> getSequentialRangeKVs(KEY startKey,
- int count, KEY prefix,
- MetadataKeyFilters.MetadataKeyFilter... filters)
- throws RocksDatabaseException, CodecException;
+ /** The same as getRangeKVs(startKey, count, prefix, filter, false). */
+ default List<KeyValue<KEY, VALUE>> getRangeKVs(KEY startKey, int count, KEY
prefix, KeyPrefixFilter filter)
+ throws RocksDatabaseException, CodecException {
+ return getRangeKVs(startKey, count, prefix, filter, false);
+ }
+
+ /** The same as getRangeKVs(startKey, count, prefix, null). */
+ default List<KeyValue<KEY, VALUE>> getRangeKVs(KEY startKey, int count, KEY
prefix)
+ throws RocksDatabaseException, CodecException {
+ return getRangeKVs(startKey, count, prefix, null);
+ }
/**
* Deletes all keys with the specified prefix from the metadata store
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
index 005822c186c..978e7168c20 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
@@ -30,7 +30,7 @@
import java.util.Map;
import java.util.Objects;
import org.apache.hadoop.hdds.utils.IOUtils;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
+import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
import org.apache.hadoop.hdds.utils.TableCacheMetrics;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheResult;
@@ -438,25 +438,9 @@ public TableCacheMetrics createCacheMetrics() {
@Override
public List<KeyValue<KEY, VALUE>> getRangeKVs(
- KEY startKey, int count, KEY prefix,
- MetadataKeyFilters.MetadataKeyFilter... filters)
- throws RocksDatabaseException, CodecException {
-
- // A null start key means to start from the beginning of the table.
- // Cannot convert a null key to bytes.
- final byte[] startKeyBytes = encodeKey(startKey);
- final byte[] prefixBytes = encodeKey(prefix);
-
- List<KeyValue<byte[], byte[]>> rangeKVBytes =
- rawTable.getRangeKVs(startKeyBytes, count, prefixBytes, filters);
- return convert(rangeKVBytes);
- }
-
- @Override
- public List<KeyValue<KEY, VALUE>> getSequentialRangeKVs(
- KEY startKey, int count, KEY prefix,
- MetadataKeyFilters.MetadataKeyFilter... filters)
- throws RocksDatabaseException, CodecException {
+ KEY startKey, int count, KEY prefix, KeyPrefixFilter filter, boolean
isSequential)
+ throws RocksDatabaseException, CodecException {
+ // TODO use CodecBuffer if the key codec supports
// A null start key means to start from the beginning of the table.
// Cannot convert a null key to bytes.
@@ -464,12 +448,7 @@ public List<KeyValue<KEY, VALUE>> getSequentialRangeKVs(
final byte[] prefixBytes = encodeKey(prefix);
List<KeyValue<byte[], byte[]>> rangeKVBytes =
- rawTable.getSequentialRangeKVs(startKeyBytes, count,
- prefixBytes, filters);
- return convert(rangeKVBytes);
- }
-
- private List<KeyValue<KEY, VALUE>> convert(List<KeyValue<byte[], byte[]>>
rangeKVBytes) throws CodecException {
+ rawTable.getRangeKVs(startKeyBytes, count, prefixBytes, filter,
isSequential);
final List<KeyValue<KEY, VALUE>> rangeKVs = new ArrayList<>();
for (KeyValue<byte[], byte[]> kv : rangeKVBytes) {
rangeKVs.add(Table.newKeyValue(decodeKey(kv.getKey()),
decodeValue(kv.getValue())));
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java
index dfc67acaf86..f234364ade4 100644
---
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java
@@ -22,7 +22,7 @@
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
+import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
/**
* InMemory Table implementation for tests.
@@ -100,14 +100,8 @@ public long getEstimatedKeyCount() {
}
@Override
- public List<KeyValue<KEY, VALUE>> getRangeKVs(KEY startKey, int count, KEY
prefix,
- MetadataKeyFilters.MetadataKeyFilter... filters) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public List<KeyValue<KEY, VALUE>> getSequentialRangeKVs(KEY startKey, int
count, KEY prefix,
-
MetadataKeyFilters.MetadataKeyFilter... filters) {
+ public List<KeyValue<KEY, VALUE>> getRangeKVs(
+ KEY startKey, int count, KEY prefix, KeyPrefixFilter filter, boolean
isSequential) {
throw new UnsupportedOperationException();
}
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
index 9fbe3b19877..008878de1d3 100644
---
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
@@ -41,7 +41,7 @@
import java.util.Set;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.StringUtils;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
+import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
import org.apache.hadoop.hdds.utils.db.cache.TableCache.CacheType;
import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
@@ -640,9 +640,7 @@ public void testPrefixedRangeKVs() throws Exception {
assertEquals(2, rangeKVs.size());
// test with a filter
- MetadataKeyFilters.KeyPrefixFilter filter1 = new MetadataKeyFilters
- .KeyPrefixFilter()
- .addFilter(StringUtils.bytes2String(samplePrefix) + "1");
+ final KeyPrefixFilter filter1 =
KeyPrefixFilter.newFilter(StringUtils.bytes2String(samplePrefix) + "1");
startKey = StringUtils.string2Bytes(
StringUtils.bytes2String(samplePrefix));
rangeKVs = testTable.getRangeKVs(startKey, blockCount,
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/PrefixParser.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/PrefixParser.java
index 35e21f5811f..6bc4e35ae5f 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/PrefixParser.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/PrefixParser.java
@@ -27,7 +27,7 @@
import java.util.List;
import java.util.concurrent.Callable;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
+import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
import org.apache.hadoop.ozone.om.OMConfigKeys;
@@ -182,19 +182,15 @@ public BucketLayout getBucketLayout() {
return BucketLayout.FILE_SYSTEM_OPTIMIZED;
}
- private void dumpTableInfo(Types type,
+ private <T extends WithParentObjectId> void dumpTableInfo(Types type,
org.apache.hadoop.fs.Path effectivePath,
- Table<String, ? extends WithParentObjectId> table,
+ Table<String, T> table,
long volumeId, long bucketId, long lastObjectId)
throws IOException {
- MetadataKeyFilters.KeyPrefixFilter filter = getPrefixFilter(
- volumeId, bucketId, lastObjectId);
+ final KeyPrefixFilter filter = getPrefixFilter(volumeId, bucketId,
lastObjectId);
+ final List<KeyValue<String, T>> infoList = table.getRangeKVs(null, 1000,
null, filter, false);
- List<? extends KeyValue
- <String, ? extends WithParentObjectId>> infoList =
- table.getRangeKVs(null, 1000, null, filter);
-
- for (KeyValue<String, ? extends WithParentObjectId> info :infoList) {
+ for (KeyValue<String, T> info : infoList) {
Path key = Paths.get(info.getKey());
dumpInfo(type, getEffectivePath(effectivePath,
key.getName(1).toString()), info.getValue(), info.getKey());
@@ -218,13 +214,11 @@ private void dumpInfo(Types level,
org.apache.hadoop.fs.Path effectivePath,
}
- private static MetadataKeyFilters.KeyPrefixFilter getPrefixFilter(
- long volumeId, long bucketId, long parentId) {
+ private static KeyPrefixFilter getPrefixFilter(long volumeId, long bucketId,
long parentId) {
String key = OM_KEY_PREFIX + volumeId +
OM_KEY_PREFIX + bucketId +
OM_KEY_PREFIX + parentId;
- return (new MetadataKeyFilters.KeyPrefixFilter())
- .addFilter(key);
+ return KeyPrefixFilter.newFilter(key);
}
public int getParserStats(Types type) {
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]