This is an automated email from the ASF dual-hosted git repository.
siyao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 09e89fd67e HDDS-6312. Use KeyPrefixContainer table to accelerate the
process of DELETE/UPDATE events (#3082)
09e89fd67e is described below
commit 09e89fd67e15fcac8a5d689b040d01992fcf4937
Author: Symious <[email protected]>
AuthorDate: Tue Aug 30 15:31:01 2022 +0800
HDDS-6312. Use KeyPrefixContainer table to accelerate the process of
DELETE/UPDATE events (#3082)
---
.../ozone/recon/api/types/ContainerKeyPrefix.java | 9 ++
...ainerKeyPrefix.java => KeyPrefixContainer.java} | 50 +++++----
.../recon/spi/ReconContainerMetadataManager.java | 17 +++
.../recon/spi/impl/KeyPrefixContainerCodec.java | 92 +++++++++++++++++
.../impl/ReconContainerMetadataManagerImpl.java | 115 +++++++++++++++++++++
.../ozone/recon/spi/impl/ReconDBDefinition.java | 12 ++-
.../ozone/recon/tasks/ContainerKeyMapperTask.java | 21 ++--
.../TestReconContainerMetadataManagerImpl.java | 25 +++++
8 files changed, 314 insertions(+), 27 deletions(-)
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerKeyPrefix.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerKeyPrefix.java
index 47519d3b20..9dd9ad5f98 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerKeyPrefix.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerKeyPrefix.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.ozone.recon.api.types;
+import org.apache.commons.lang3.StringUtils;
+
/**
* Class to encapsulate the Key information needed for the Recon container DB.
* Currently, it is the containerId and the whole key + key version.
@@ -68,6 +70,13 @@ public class ContainerKeyPrefix {
this.keyVersion = keyVersion;
}
+ public KeyPrefixContainer toKeyPrefixContainer() {
+ if (StringUtils.isNotEmpty(keyPrefix)) {
+ return new KeyPrefixContainer(keyPrefix, keyVersion, containerId);
+ }
+ return null;
+ }
+
@Override
public boolean equals(Object o) {
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerKeyPrefix.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyPrefixContainer.java
similarity index 65%
copy from
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerKeyPrefix.java
copy to
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyPrefixContainer.java
index 47519d3b20..c77d3a835a 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerKeyPrefix.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyPrefixContainer.java
@@ -18,38 +18,38 @@
package org.apache.hadoop.ozone.recon.api.types;
+import java.util.Objects;
+
/**
* Class to encapsulate the Key information needed for the Recon container DB.
- * Currently, it is the containerId and the whole key + key version.
+ * Currently, it is the whole key + key version and the containerId.
*/
-public class ContainerKeyPrefix {
+public class KeyPrefixContainer {
- private long containerId;
private String keyPrefix;
private long keyVersion = -1;
+ private long containerId = -1;
- public ContainerKeyPrefix(long containerId, String keyPrefix) {
- this.containerId = containerId;
+ public KeyPrefixContainer(String keyPrefix, long keyVersion) {
this.keyPrefix = keyPrefix;
+ this.keyVersion = keyVersion;
}
- public ContainerKeyPrefix(long containerId, String keyPrefix,
- long keyVersion) {
- this.containerId = containerId;
+ public KeyPrefixContainer(String keyPrefix, long keyVersion,
+ long containerId) {
this.keyPrefix = keyPrefix;
this.keyVersion = keyVersion;
- }
-
- public ContainerKeyPrefix(long containerId) {
this.containerId = containerId;
}
- public long getContainerId() {
- return containerId;
+ public KeyPrefixContainer(ContainerKeyPrefix containerKeyPrefix) {
+ this.keyPrefix = containerKeyPrefix.getKeyPrefix();
+ this.keyVersion = containerKeyPrefix.getKeyVersion();
+ this.containerId = containerKeyPrefix.getContainerId();
}
- public void setContainerId(long containerId) {
- this.containerId = containerId;
+ public KeyPrefixContainer(String keyPrefix) {
+ this.keyPrefix = keyPrefix;
}
public String getKeyPrefix() {
@@ -68,13 +68,26 @@ public class ContainerKeyPrefix {
this.keyVersion = keyVersion;
}
+ public long getContainerId() {
+ return containerId;
+ }
+
+ public void setContainerId(long containerId) {
+ this.containerId = containerId;
+ }
+
+ public ContainerKeyPrefix toContainerKeyPrefix() {
+ return new ContainerKeyPrefix(this.containerId,
+ this.keyPrefix, this.keyVersion);
+ }
+
@Override
public boolean equals(Object o) {
- if (!(o instanceof ContainerKeyPrefix)) {
+ if (!(o instanceof KeyPrefixContainer)) {
return false;
}
- ContainerKeyPrefix that = (ContainerKeyPrefix) o;
+ KeyPrefixContainer that = (KeyPrefixContainer) o;
return (this.containerId == that.containerId) &&
this.keyPrefix.equals(that.keyPrefix) &&
this.keyVersion == that.keyVersion;
@@ -82,8 +95,7 @@ public class ContainerKeyPrefix {
@Override
public int hashCode() {
- return Long.valueOf(containerId).hashCode() + 13 * keyPrefix.hashCode() +
- 17 * Long.valueOf(keyVersion).hashCode();
+ return Objects.hash(containerId, keyPrefix, keyPrefix);
}
}
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java
index 1fb2fb2856..90cd37daa9 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.utils.db.RDBBatchOperation;
import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix;
import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata;
import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.recon.api.types.KeyPrefixContainer;
import org.apache.hadoop.ozone.recon.scm.ContainerReplicaHistory;
/**
@@ -235,5 +236,21 @@ public interface ReconContainerMetadataManager {
*/
void commitBatchOperation(RDBBatchOperation rdbBatchOperation)
throws IOException;
+
+ /**
+ * Get iterator to the entire Key_Container DB.
+ * @return TableIterator
+ */
+ TableIterator getKeyContainerTableIterator() throws IOException;
+ /**
+ * Get the stored key prefixes for the given containerId starting
+ * after the given keyPrefix.
+ *
+ * @param prevKeyPrefix the key prefix to seek to and start scanning.
+ * @param keyVersion the key version to seek
+ * @return Map of Key prefix -> count.
+ */
+ Map<KeyPrefixContainer, Integer> getContainerForKeyPrefixes(
+ String prevKeyPrefix, long keyVersion) throws IOException;
}
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/KeyPrefixContainerCodec.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/KeyPrefixContainerCodec.java
new file mode 100644
index 0000000000..ca5b37ed44
--- /dev/null
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/KeyPrefixContainerCodec.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.spi.impl;
+
+import com.google.common.base.Preconditions;
+import com.google.common.primitives.Longs;
+import org.apache.commons.lang3.ArrayUtils;
+import org.apache.hadoop.hdds.utils.db.Codec;
+import org.apache.hadoop.ozone.recon.api.types.KeyPrefixContainer;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import static org.apache.commons.compress.utils.CharsetNames.UTF_8;
+
+/**
+ * Codec to encode KeyPrefixContainer as byte array.
+ */
+public class KeyPrefixContainerCodec implements Codec<KeyPrefixContainer> {
+
+ private static final String KEY_DELIMITER = "_";
+
+ @Override
+ public byte[] toPersistedFormat(KeyPrefixContainer keyPrefixContainer)
+ throws IOException {
+ Preconditions.checkNotNull(keyPrefixContainer,
+ "Null object can't be converted to byte array.");
+ byte[] keyPrefixBytes = keyPrefixContainer.getKeyPrefix().getBytes(UTF_8);
+
+ //Prefix seek can be done only with keyPrefix. In that case, we can
+ // expect the version and the containerId to be undefined.
+ if (keyPrefixContainer.getKeyVersion() != -1) {
+ keyPrefixBytes = ArrayUtils.addAll(keyPrefixBytes, KEY_DELIMITER
+ .getBytes(UTF_8));
+ keyPrefixBytes = ArrayUtils.addAll(keyPrefixBytes, Longs.toByteArray(
+ keyPrefixContainer.getKeyVersion()));
+ }
+
+ if (keyPrefixContainer.getContainerId() != -1) {
+ keyPrefixBytes = ArrayUtils.addAll(keyPrefixBytes, KEY_DELIMITER
+ .getBytes(UTF_8));
+ keyPrefixBytes = ArrayUtils.addAll(keyPrefixBytes, Longs.toByteArray(
+ keyPrefixContainer.getContainerId()));
+ }
+
+ return keyPrefixBytes;
+ }
+
+ @Override
+ public KeyPrefixContainer fromPersistedFormat(byte[] rawData)
+ throws IOException {
+
+ // When reading from byte[], we can always expect to have the key, version
+ // and version parts in the byte array.
+ byte[] keyBytes = ArrayUtils.subarray(rawData,
+ 0, rawData.length - Long.BYTES * 2 - 2);
+ String keyPrefix = new String(keyBytes, UTF_8);
+
+ // Second 8 bytes is the key version.
+ byte[] versionBytes = ArrayUtils.subarray(rawData,
+ rawData.length - Long.BYTES * 2 - 1,
+ rawData.length - Long.BYTES - 1);
+ long version = ByteBuffer.wrap(versionBytes).getLong();
+
+ // Last 8 bytes is the containerId.
+ long containerIdFromDB = ByteBuffer.wrap(ArrayUtils.subarray(rawData,
+ rawData.length - Long.BYTES,
+ rawData.length)).getLong();
+ return new KeyPrefixContainer(keyPrefix, version, containerIdFromDB);
+ }
+
+ @Override
+ public KeyPrefixContainer copyObject(KeyPrefixContainer object) {
+ return object;
+ }
+}
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java
index 1461437679..8e9931798c 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerMetadataManagerImpl.java
@@ -19,12 +19,15 @@
package org.apache.hadoop.ozone.recon.spi.impl;
import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_COUNT_KEY;
+import static
org.apache.hadoop.ozone.recon.spi.impl.ReconDBDefinition.KEY_CONTAINER;
import static
org.apache.hadoop.ozone.recon.spi.impl.ReconDBDefinition.REPLICA_HISTORY_V2;
import static
org.apache.hadoop.ozone.recon.spi.impl.ReconDBProvider.truncateTable;
import static
org.apache.hadoop.ozone.recon.spi.impl.ReconDBDefinition.CONTAINER_KEY;
import static
org.apache.hadoop.ozone.recon.spi.impl.ReconDBDefinition.CONTAINER_KEY_COUNT;
import java.io.IOException;
+import java.time.Duration;
+import java.time.Instant;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashMap;
@@ -41,6 +44,7 @@ import org.apache.hadoop.hdds.utils.db.RDBBatchOperation;
import org.apache.hadoop.ozone.recon.ReconUtils;
import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix;
import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata;
+import org.apache.hadoop.ozone.recon.api.types.KeyPrefixContainer;
import org.apache.hadoop.ozone.recon.scm.ContainerReplicaHistory;
import org.apache.hadoop.ozone.recon.scm.ContainerReplicaHistoryList;
import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager;
@@ -65,6 +69,7 @@ public class ReconContainerMetadataManagerImpl
LoggerFactory.getLogger(ReconContainerMetadataManagerImpl.class);
private Table<ContainerKeyPrefix, Integer> containerKeyTable;
+ private Table<KeyPrefixContainer, Integer> keyContainerTable;
private Table<Long, Long> containerKeyCountTable;
private Table<Long, ContainerReplicaHistoryList>
containerReplicaHistoryTable;
@@ -97,13 +102,19 @@ public class ReconContainerMetadataManagerImpl
throws IOException {
// clear and re-init all container-related tables
truncateTable(this.containerKeyTable);
+ truncateTable(this.keyContainerTable);
truncateTable(this.containerKeyCountTable);
initializeTables();
if (containerKeyPrefixCounts != null) {
+ KeyPrefixContainer tmpKeyPrefixContainer;
for (Map.Entry<ContainerKeyPrefix, Integer> entry :
containerKeyPrefixCounts.entrySet()) {
containerKeyTable.put(entry.getKey(), entry.getValue());
+ tmpKeyPrefixContainer = entry.getKey().toKeyPrefixContainer();
+ if (tmpKeyPrefixContainer != null) {
+ keyContainerTable.put(tmpKeyPrefixContainer, entry.getValue());
+ }
}
}
@@ -117,6 +128,12 @@ public class ReconContainerMetadataManagerImpl
private void initializeTables() {
try {
this.containerKeyTable = CONTAINER_KEY.getTable(containerDbStore);
+ this.keyContainerTable = KEY_CONTAINER.getTable(containerDbStore);
+ if (keyContainerTable.isEmpty()) {
+ LOG.info("KEY_CONTAINER Table is empty, " +
+ "initializing from CONTAINER_KEY Table ...");
+ initializeKeyContainerTable();
+ }
this.containerKeyCountTable =
CONTAINER_KEY_COUNT.getTable(containerDbStore);
this.containerReplicaHistoryTable =
@@ -139,6 +156,9 @@ public class ReconContainerMetadataManagerImpl
Integer count)
throws IOException {
containerKeyTable.put(containerKeyPrefix, count);
+ if (containerKeyPrefix.toKeyPrefixContainer() != null) {
+ keyContainerTable.put(containerKeyPrefix.toKeyPrefixContainer(), count);
+ }
}
/**
@@ -156,6 +176,10 @@ public class ReconContainerMetadataManagerImpl
containerKeyPrefix,
Integer count) throws IOException {
containerKeyTable.putWithBatch(batch, containerKeyPrefix, count);
+ if (containerKeyPrefix.toKeyPrefixContainer() != null) {
+ keyContainerTable.putWithBatch(batch,
+ containerKeyPrefix.toKeyPrefixContainer(), count);
+ }
}
/**
@@ -455,6 +479,9 @@ public class ReconContainerMetadataManagerImpl
public void deleteContainerMapping(ContainerKeyPrefix containerKeyPrefix)
throws IOException {
containerKeyTable.delete(containerKeyPrefix);
+ if (!StringUtils.isEmpty(containerKeyPrefix.getKeyPrefix())) {
+ keyContainerTable.delete(containerKeyPrefix.toKeyPrefixContainer());
+ }
}
@Override
@@ -483,6 +510,11 @@ public class ReconContainerMetadataManagerImpl
return containerKeyTable.iterator();
}
+ @Override
+ public TableIterator getKeyContainerTableIterator() {
+ return keyContainerTable.iterator();
+ }
+
/**
* Store the total count of containers into the container DB store.
*
@@ -515,4 +547,87 @@ public class ReconContainerMetadataManagerImpl
throws IOException {
this.containerDbStore.commitBatchOperation(rdbBatchOperation);
}
+
+ /**
+ * Use the DB's prefix seek iterator to start the scan from the given
+ * key prefix and key version.
+ *
+ * @param keyPrefix the given keyPrefix.
+ * @param keyVersion the given keyVersion.
+ * @return Map of (KeyPrefixContainer, Integer).
+ */
+ @Override
+ public Map<KeyPrefixContainer, Integer> getContainerForKeyPrefixes(
+ String keyPrefix, long keyVersion) throws IOException {
+
+ Map<KeyPrefixContainer, Integer> containers = new LinkedHashMap<>();
+ try (TableIterator<KeyPrefixContainer,
+ ? extends KeyValue<KeyPrefixContainer, Integer>> keyIterator =
+ keyContainerTable.iterator()) {
+ KeyPrefixContainer seekKey;
+ if (keyVersion != -1) {
+ seekKey = new KeyPrefixContainer(keyPrefix, keyVersion);
+ } else {
+ seekKey = new KeyPrefixContainer(keyPrefix);
+ }
+ KeyValue<KeyPrefixContainer, Integer> seekKeyValue =
+ keyIterator.seek(seekKey);
+
+ // check if RocksDB was able to seek correctly to the given key prefix
+ // if not, then return empty result
+ // In case of an empty prevKeyPrefix, all the keys in the container are
+ // returned
+ if (seekKeyValue == null ||
+ (keyVersion != -1 &&
+ seekKeyValue.getKey().getKeyVersion() != keyVersion)) {
+ return containers;
+ }
+
+ while (keyIterator.hasNext()) {
+ KeyValue<KeyPrefixContainer, Integer> keyValue = keyIterator.next();
+ KeyPrefixContainer keyPrefixContainer = keyValue.getKey();
+
+ // The prefix seek only guarantees that the iterator's head will be
+ // positioned at the first prefix match. We still have to check the key
+ // prefix.
+ if (keyPrefixContainer.getKeyPrefix().equals(keyPrefix)) {
+ if (keyPrefixContainer.getContainerId() != -1 &&
+ (keyVersion == -1 ||
+ keyPrefixContainer.getKeyVersion() == keyVersion)) {
+ containers.put(new KeyPrefixContainer(keyPrefix,
+ keyPrefixContainer.getKeyVersion(),
+ keyPrefixContainer.getContainerId()),
+ keyValue.getValue());
+ } else {
+ LOG.warn("Null container returned for keyPrefix = {}," +
+ " keyVersion = {} ", keyPrefix, keyVersion);
+ }
+ } else {
+ // Break on first mismatch
+ break;
+ }
+ }
+ }
+ return containers;
+ }
+
+ private void initializeKeyContainerTable() throws IOException {
+ Instant start = Instant.now();
+ TableIterator<ContainerKeyPrefix, ? extends KeyValue<ContainerKeyPrefix,
+ Integer>> iterator = containerKeyTable.iterator();
+ KeyValue<ContainerKeyPrefix, Integer> keyValue;
+ long count = 0;
+ while (iterator.hasNext()) {
+ keyValue = iterator.next();
+ ContainerKeyPrefix containerKeyPrefix = keyValue.getKey();
+ if (!StringUtils.isEmpty(containerKeyPrefix.getKeyPrefix()) &&
+ containerKeyPrefix.getContainerId() != -1) {
+ keyContainerTable.put(containerKeyPrefix.toKeyPrefixContainer(), 1);
+ }
+ count++;
+ }
+ long duration = Duration.between(start, Instant.now()).toMillis();
+ LOG.info("It took {} seconds to initialized {} records" +
+ " to KEY_CONTAINER table", (double) duration / 1000, count);
+ }
}
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java
index 845e6b20b9..3d105c70d3 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.utils.db.IntegerCodec;
import org.apache.hadoop.hdds.utils.db.LongCodec;
import org.apache.hadoop.ozone.recon.ReconServerConfigKeys;
import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix;
+import org.apache.hadoop.ozone.recon.api.types.KeyPrefixContainer;
import org.apache.hadoop.ozone.recon.codec.ContainerReplicaHistoryListCodec;
import org.apache.hadoop.ozone.recon.codec.NSSummaryCodec;
import org.apache.hadoop.ozone.recon.scm.ContainerReplicaHistoryList;
@@ -48,6 +49,15 @@ public class ReconDBDefinition implements DBDefinition {
Integer.class,
new IntegerCodec());
+ public static final DBColumnFamilyDefinition<KeyPrefixContainer, Integer>
+ KEY_CONTAINER =
+ new DBColumnFamilyDefinition<>(
+ "keyContainerTable",
+ KeyPrefixContainer.class,
+ new KeyPrefixContainerCodec(),
+ Integer.class,
+ new IntegerCodec());
+
public static final DBColumnFamilyDefinition<Long, Long>
CONTAINER_KEY_COUNT =
new DBColumnFamilyDefinition<>(
@@ -97,7 +107,7 @@ public class ReconDBDefinition implements DBDefinition {
@Override
public DBColumnFamilyDefinition[] getColumnFamilies() {
return new DBColumnFamilyDefinition[] {
- CONTAINER_KEY, CONTAINER_KEY_COUNT, REPLICA_HISTORY,
+ CONTAINER_KEY, KEY_CONTAINER, CONTAINER_KEY_COUNT, REPLICA_HISTORY,
NAMESPACE_SUMMARY, REPLICA_HISTORY_V2};
}
}
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java
index 72fd26296f..01fcdaf765 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix;
+import org.apache.hadoop.ozone.recon.api.types.KeyPrefixContainer;
import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
@@ -252,18 +253,24 @@ public class ContainerKeyMapperTask implements
ReconOmTask {
List<ContainerKeyPrefix>
deletedContainerKeyList)
throws IOException {
+
Set<ContainerKeyPrefix> keysToBeDeleted = new HashSet<>();
- try (TableIterator<ContainerKeyPrefix,
- ? extends Table.KeyValue<ContainerKeyPrefix, Integer>>
containerIterator
- = reconContainerMetadataManager.getContainerTableIterator()) {
+ try (TableIterator<KeyPrefixContainer, ? extends
+ Table.KeyValue<KeyPrefixContainer, Integer>> keyContainerIterator =
+ reconContainerMetadataManager.getKeyContainerTableIterator()) {
// Check if we have keys in this container in the DB
- while (containerIterator.hasNext()) {
- Table.KeyValue<ContainerKeyPrefix, Integer> keyValue =
- containerIterator.next();
+ keyContainerIterator.seek(new KeyPrefixContainer(key));
+ while (keyContainerIterator.hasNext()) {
+ Table.KeyValue<KeyPrefixContainer, Integer> keyValue =
+ keyContainerIterator.next();
String keyPrefix = keyValue.getKey().getKeyPrefix();
if (keyPrefix.equals(key)) {
- keysToBeDeleted.add(keyValue.getKey());
+ if (keyValue.getKey().getContainerId() != -1) {
+ keysToBeDeleted.add(keyValue.getKey().toContainerKeyPrefix());
+ }
+ } else {
+ break;
}
}
}
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconContainerMetadataManagerImpl.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconContainerMetadataManagerImpl.java
index 66afc349c8..d0184df9be 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconContainerMetadataManagerImpl.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconContainerMetadataManagerImpl.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hdds.utils.db.RDBBatchOperation;
import org.apache.hadoop.ozone.recon.ReconTestInjector;
import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix;
import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata;
+import org.apache.hadoop.ozone.recon.api.types.KeyPrefixContainer;
import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager;
import org.junit.Assert;
import org.junit.Before;
@@ -321,6 +322,30 @@ public class TestReconContainerMetadataManagerImpl {
assertEquals(0, keyPrefixMap.size());
}
+ @Test
+ public void testGetContainerForKeyPrefixesWithKeyPrefix() throws Exception {
+ long containerId = 1L;
+ long nextContainerId = 2L;
+ populateKeysInContainers(containerId, nextContainerId);
+
+ Map<KeyPrefixContainer, Integer> keyPrefixMap =
+ reconContainerMetadataManager.getContainerForKeyPrefixes(keyPrefix1,
0);
+ assertEquals(1, keyPrefixMap.size());
+
+ keyPrefixMap = reconContainerMetadataManager.getContainerForKeyPrefixes(
+ keyPrefix3, 0);
+ assertEquals(1, keyPrefixMap.size());
+
+ keyPrefixMap = reconContainerMetadataManager.getContainerForKeyPrefixes(
+ keyPrefix3, 2);
+ assertEquals(0, keyPrefixMap.size());
+
+ // test for negative cases
+ keyPrefixMap = reconContainerMetadataManager.getContainerForKeyPrefixes(
+ "V3/B1/invalid", -1);
+ assertEquals(0, keyPrefixMap.size());
+ }
+
@Test
public void testGetContainersWithPrevContainer() throws Exception {
long containerId = 1L;
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]