This is an automated email from the ASF dual-hosted git repository.
sodonnell pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new a7e301472f HDDS-11660. Recon List Key API: Reduce object creation and
buffering memory (#7405)
a7e301472f is described below
commit a7e301472f7cb95a48ceb7ffc1dcec7ca40c8685
Author: Stephen O'Donnell <[email protected]>
AuthorDate: Fri Nov 8 10:00:02 2024 +0000
HDDS-11660. Recon List Key API: Reduce object creation and buffering memory
(#7405)
---
.../org/apache/hadoop/ozone/recon/ReconUtils.java | 33 ++++-
.../ozone/recon/api/OMDBInsightEndpoint.java | 95 ++++++--------
.../recon/api/types/KeyEntityInfoProtoWrapper.java | 145 +++++++++++++++++++++
.../ozone/recon/api/types/ListKeysResponse.java | 6 +-
.../recon/recovery/ReconOMMetadataManager.java | 12 ++
.../recon/recovery/ReconOmMetadataManagerImpl.java | 9 ++
.../ozone/recon/api/TestOmDBInsightEndPoint.java | 18 +--
7 files changed, 244 insertions(+), 74 deletions(-)
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
index f65e2f30cb..88418baffa 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
@@ -291,11 +291,34 @@ public class ReconUtils {
*/
public static String constructFullPath(OmKeyInfo omKeyInfo,
ReconNamespaceSummaryManager
reconNamespaceSummaryManager,
- ReconOMMetadataManager
omMetadataManager)
- throws IOException {
+ ReconOMMetadataManager
omMetadataManager) throws IOException {
+ return constructFullPath(omKeyInfo.getKeyName(),
omKeyInfo.getParentObjectID(), omKeyInfo.getVolumeName(),
+ omKeyInfo.getBucketName(), reconNamespaceSummaryManager,
omMetadataManager);
+ }
- StringBuilder fullPath = new StringBuilder(omKeyInfo.getKeyName());
- long parentId = omKeyInfo.getParentObjectID();
+ /**
+ * Constructs the full path of a key from its key name and parent ID using a
bottom-up approach, starting from the
+ * leaf node.
+ *
+ * The method begins with the leaf node (the key itself) and recursively
prepends parent directory names, fetched
+ * via NSSummary objects, until reaching the parent bucket (parentId is -1).
It effectively builds the path from
+ * bottom to top, finally prepending the volume and bucket names to complete
the full path. If the directory structure
+ * is currently being rebuilt (indicated by the rebuildTriggered flag), this
method returns an empty string to signify
+ * that path construction is temporarily unavailable.
+ *
+ * @param keyName The name of the key
+ * @param initialParentId The parent ID of the key
+ * @param volumeName The name of the volume
+ * @param bucketName The name of the bucket
+ * @return The constructed full path of the key as a String, or an empty
string if a rebuild is in progress and
+ * the path cannot be constructed at this time.
+ * @throws IOException
+ */
+ public static String constructFullPath(String keyName, long initialParentId,
String volumeName, String bucketName,
+ ReconNamespaceSummaryManager
reconNamespaceSummaryManager,
+ ReconOMMetadataManager
omMetadataManager) throws IOException {
+ StringBuilder fullPath = new StringBuilder(keyName);
+ long parentId = initialParentId;
boolean isDirectoryPresent = false;
while (parentId != 0) {
@@ -320,8 +343,6 @@ public class ReconUtils {
}
// Prepend the volume and bucket to the constructed path
- String volumeName = omKeyInfo.getVolumeName();
- String bucketName = omKeyInfo.getBucketName();
fullPath.insert(0, volumeName + OM_KEY_PREFIX + bucketName +
OM_KEY_PREFIX);
if (isDirectoryPresent) {
return OmUtils.normalizeKey(fullPath.toString(), true);
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
index 8611abe88c..21c9552c03 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.recon.ReconResponseUtils;
import org.apache.hadoop.ozone.recon.ReconUtils;
import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler;
import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo;
+import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfoProtoWrapper;
import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse;
import org.apache.hadoop.ozone.recon.api.types.ListKeysResponse;
import org.apache.hadoop.ozone.recon.api.types.NSSummary;
@@ -58,7 +59,6 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
-import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
@@ -989,7 +989,7 @@ public class OMDBInsightEndpoint {
listKeysResponse = (ListKeysResponse) response.getEntity();
}
- List<KeyEntityInfo> keyInfoList = listKeysResponse.getKeys();
+ List<KeyEntityInfoProtoWrapper> keyInfoList = listKeysResponse.getKeys();
if (!keyInfoList.isEmpty()) {
listKeysResponse.setLastKey(keyInfoList.get(keyInfoList.size() -
1).getKey());
}
@@ -1003,66 +1003,49 @@ public class OMDBInsightEndpoint {
listKeysResponse.setPath(paramInfo.getStartPrefix());
long replicatedTotal = 0;
long unreplicatedTotal = 0;
- boolean keysFound = false; // Flag to track if any keys are found
// Search keys from non-FSO layout.
- Map<String, OmKeyInfo> obsKeys;
- Table<String, OmKeyInfo> keyTable =
- omMetadataManager.getKeyTable(BucketLayout.LEGACY);
- obsKeys = retrieveKeysFromTable(keyTable, paramInfo);
- for (Map.Entry<String, OmKeyInfo> entry : obsKeys.entrySet()) {
- keysFound = true;
- KeyEntityInfo keyEntityInfo =
- createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue());
-
- listKeysResponse.getKeys().add(keyEntityInfo);
- replicatedTotal += entry.getValue().getReplicatedSize();
- unreplicatedTotal += entry.getValue().getDataSize();
- }
+ Table<String, KeyEntityInfoProtoWrapper> keyTable =
+ omMetadataManager.getKeyTableLite(BucketLayout.LEGACY);
+ retrieveKeysFromTable(keyTable, paramInfo, listKeysResponse.getKeys());
+
// Search keys from FSO layout.
- Map<String, OmKeyInfo> fsoKeys = searchKeysInFSO(paramInfo);
- for (Map.Entry<String, OmKeyInfo> entry : fsoKeys.entrySet()) {
- keysFound = true;
- KeyEntityInfo keyEntityInfo =
- createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue());
-
- listKeysResponse.getKeys().add(keyEntityInfo);
- replicatedTotal += entry.getValue().getReplicatedSize();
- unreplicatedTotal += entry.getValue().getDataSize();
- }
+ searchKeysInFSO(paramInfo, listKeysResponse.getKeys());
// If no keys were found, return a response indicating that no keys
matched
- if (!keysFound) {
+ if (listKeysResponse.getKeys().isEmpty()) {
return
ReconResponseUtils.noMatchedKeysResponse(paramInfo.getStartPrefix());
}
+ for (KeyEntityInfoProtoWrapper keyEntityInfo :
listKeysResponse.getKeys()) {
+ replicatedTotal += keyEntityInfo.getReplicatedSize();
+ unreplicatedTotal += keyEntityInfo.getSize();
+ }
+
// Set the aggregated totals in the response
listKeysResponse.setReplicatedDataSize(replicatedTotal);
listKeysResponse.setUnReplicatedDataSize(unreplicatedTotal);
return Response.ok(listKeysResponse).build();
- } catch (IOException e) {
- return ReconResponseUtils.createInternalServerErrorResponse(
- "Error listing keys from OM DB: " + e.getMessage());
} catch (RuntimeException e) {
+ LOG.error("Error generating listKeys response", e);
return ReconResponseUtils.createInternalServerErrorResponse(
"Unexpected runtime error while searching keys in OM DB: " +
e.getMessage());
} catch (Exception e) {
+ LOG.error("Error generating listKeys response", e);
return ReconResponseUtils.createInternalServerErrorResponse(
"Error listing keys from OM DB: " + e.getMessage());
}
}
- public Map<String, OmKeyInfo> searchKeysInFSO(ParamInfo paramInfo)
+ public void searchKeysInFSO(ParamInfo paramInfo,
List<KeyEntityInfoProtoWrapper> results)
throws IOException {
- int originalLimit = paramInfo.getLimit();
- Map<String, OmKeyInfo> matchedKeys = new LinkedHashMap<>();
// Convert the search prefix to an object path for FSO buckets
String startPrefixObjectPath =
convertStartPrefixPathToObjectIdPath(paramInfo.getStartPrefix());
String[] names = parseRequestPath(startPrefixObjectPath);
- Table<String, OmKeyInfo> fileTable =
- omMetadataManager.getKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED);
+ Table<String, KeyEntityInfoProtoWrapper> fileTable =
+ omMetadataManager.getKeyTableLite(BucketLayout.FILE_SYSTEM_OPTIMIZED);
// If names.length > 2, then the search prefix is at the level above
bucket level hence
// no need to find parent or extract id's or find subpaths as the
fileTable is
@@ -1075,7 +1058,7 @@ public class OMDBInsightEndpoint {
NSSummary parentSummary =
reconNamespaceSummaryManager.getNSSummary(parentId);
if (parentSummary == null) {
- return matchedKeys;
+ return;
}
List<String> subPaths = new ArrayList<>();
// Add the initial search prefix object path because it can have both
files and subdirectories with files.
@@ -1087,21 +1070,17 @@ public class OMDBInsightEndpoint {
// Iterate over the subpaths and retrieve the files
for (String subPath : subPaths) {
paramInfo.setStartPrefix(subPath);
- matchedKeys.putAll(
- retrieveKeysFromTable(fileTable, paramInfo));
- paramInfo.setLimit(originalLimit - matchedKeys.size());
- if (matchedKeys.size() >= originalLimit) {
+ retrieveKeysFromTable(fileTable, paramInfo, results);
+ if (results.size() >= paramInfo.getLimit()) {
break;
}
}
- return matchedKeys;
+ return;
}
paramInfo.setStartPrefix(startPrefixObjectPath);
// Iterate over for bucket and volume level search
- matchedKeys.putAll(
- retrieveKeysFromTable(fileTable, paramInfo));
- return matchedKeys;
+ retrieveKeysFromTable(fileTable, paramInfo, results);
}
@@ -1174,32 +1153,31 @@ public class OMDBInsightEndpoint {
* @return A map of keys and their corresponding OmKeyInfo objects.
* @throws IOException If there are problems accessing the table.
*/
- private Map<String, OmKeyInfo> retrieveKeysFromTable(
- Table<String, OmKeyInfo> table, ParamInfo paramInfo)
+ private void retrieveKeysFromTable(
+ Table<String, KeyEntityInfoProtoWrapper> table, ParamInfo paramInfo,
List<KeyEntityInfoProtoWrapper> results)
throws IOException {
boolean skipPrevKey = false;
String seekKey = paramInfo.getPrevKey();
- Map<String, OmKeyInfo> matchedKeys = new LinkedHashMap<>();
try (
- TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
keyIter = table.iterator()) {
+ TableIterator<String, ? extends Table.KeyValue<String,
KeyEntityInfoProtoWrapper>> keyIter = table.iterator()) {
if (!paramInfo.isSkipPrevKeyDone() && isNotBlank(seekKey)) {
skipPrevKey = true;
- Table.KeyValue<String, OmKeyInfo> seekKeyValue =
+ Table.KeyValue<String, KeyEntityInfoProtoWrapper> seekKeyValue =
keyIter.seek(seekKey);
// check if RocksDB was able to seek correctly to the given key prefix
// if not, then return empty result
// In case of an empty prevKeyPrefix, all the keys are returned
if (seekKeyValue == null ||
(!seekKeyValue.getKey().equals(paramInfo.getPrevKey()))) {
- return matchedKeys;
+ return;
}
} else {
keyIter.seek(paramInfo.getStartPrefix());
}
while (keyIter.hasNext()) {
- Table.KeyValue<String, OmKeyInfo> entry = keyIter.next();
+ Table.KeyValue<String, KeyEntityInfoProtoWrapper> entry =
keyIter.next();
String dbKey = entry.getKey();
if (!dbKey.startsWith(paramInfo.getStartPrefix())) {
break; // Exit the loop if the key no longer matches the prefix
@@ -1209,9 +1187,14 @@ public class OMDBInsightEndpoint {
continue;
}
if (applyFilters(entry, paramInfo)) {
- matchedKeys.put(dbKey, entry.getValue());
+ KeyEntityInfoProtoWrapper keyEntityInfo = entry.getValue();
+ keyEntityInfo.setKey(dbKey);
+
keyEntityInfo.setPath(ReconUtils.constructFullPath(keyEntityInfo.getKeyName(),
keyEntityInfo.getParentId(),
+ keyEntityInfo.getVolumeName(), keyEntityInfo.getBucketName(),
reconNamespaceSummaryManager,
+ omMetadataManager));
+ results.add(keyEntityInfo);
paramInfo.setLastKey(dbKey);
- if (matchedKeys.size() >= paramInfo.getLimit()) {
+ if (results.size() >= paramInfo.getLimit()) {
break;
}
}
@@ -1220,10 +1203,10 @@ public class OMDBInsightEndpoint {
LOG.error("Error retrieving keys from table for path: {}",
paramInfo.getStartPrefix(), exception);
throw exception;
}
- return matchedKeys;
}
- private boolean applyFilters(Table.KeyValue<String, OmKeyInfo> entry,
ParamInfo paramInfo) throws IOException {
+ private boolean applyFilters(Table.KeyValue<String,
KeyEntityInfoProtoWrapper> entry, ParamInfo paramInfo)
+ throws IOException {
LOG.debug("Applying filters on : {}", entry.getKey());
@@ -1238,7 +1221,7 @@ public class OMDBInsightEndpoint {
return false;
}
- return entry.getValue().getDataSize() >= paramInfo.getKeySize();
+ return entry.getValue().getSize() >= paramInfo.getKeySize();
}
/**
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfoProtoWrapper.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfoProtoWrapper.java
new file mode 100644
index 0000000000..b61ebf9963
--- /dev/null
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfoProtoWrapper.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.api.types;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.hdds.utils.db.Codec;
+import org.apache.hadoop.hdds.utils.db.DelegatedCodec;
+import org.apache.hadoop.hdds.utils.db.Proto2Codec;
+import org.apache.hadoop.ozone.om.helpers.QuotaUtil;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+
+/**
+ * POJO object wrapper for metadata of a given key/file. This class wraps a
KeyInfo protobuf
+ * object and delegates most accessors to it.
+ */
+public final class KeyEntityInfoProtoWrapper {
+
+ public static Codec<KeyEntityInfoProtoWrapper> getCodec() {
+ return new DelegatedCodec<>(
+
Proto2Codec.get(OzoneManagerProtocolProtos.KeyInfo.getDefaultInstance()),
+ KeyEntityInfoProtoWrapper::getFromProtobuf,
+ KeyEntityInfoProtoWrapper::toProtobuf,
+ KeyEntityInfoProtoWrapper.class);
+ }
+
+ private final OzoneManagerProtocolProtos.KeyInfo keyInfoProto;
+
+ /** This is key table key of rocksDB and will help UI to implement pagination
+ * where UI will use the last record key to send in API as preKeyPrefix. */
+ @JsonProperty("key")
+ private String key;
+
+ /** Path of a key/file. */
+ @JsonProperty("path")
+ private String path;
+
+ @JsonProperty("replicatedSize")
+ private final long replicatedSize;
+
+ @JsonProperty("replicationInfo")
+ private final ReplicationConfig replicationConfig;
+
+ private KeyEntityInfoProtoWrapper(OzoneManagerProtocolProtos.KeyInfo proto) {
+ keyInfoProto = proto;
+ replicationConfig = ReplicationConfig.fromProto(proto.getType(),
proto.getFactor(),
+ proto.getEcReplicationConfig());
+ this.replicatedSize = QuotaUtil.getReplicatedSize(getSize(),
getReplicationConfig());
+ }
+
+ public static KeyEntityInfoProtoWrapper
getFromProtobuf(OzoneManagerProtocolProtos.KeyInfo keyInfo) {
+ return new KeyEntityInfoProtoWrapper(keyInfo);
+ }
+
+ public OzoneManagerProtocolProtos.KeyInfo toProtobuf() {
+ throw new UnsupportedOperationException("This method is not supported.");
+ }
+
+ @JsonProperty("key")
+ public String getKey() {
+ if (key == null) {
+ throw new IllegalStateException("Key must be set to correctly serialize
this object.");
+ }
+ return key;
+ }
+
+ public void setKey(String key) {
+ this.key = key;
+ }
+
+ @JsonProperty("path")
+ public String getPath() {
+ if (path == null) {
+ throw new IllegalStateException("Path must be set to correctly serialize
this object.");
+ }
+ return path;
+ }
+
+ public void setPath(String path) {
+ this.path = path;
+ }
+
+ @JsonProperty("size")
+ public long getSize() {
+ return keyInfoProto.getDataSize();
+ }
+
+ @JsonProperty("replicatedSize")
+ public long getReplicatedSize() {
+ return replicatedSize;
+ }
+
+ @JsonProperty("replicationInfo")
+ public ReplicationConfig getReplicationConfig() {
+ return replicationConfig;
+ }
+
+ @JsonProperty("creationTime")
+ public long getCreationTime() {
+ return keyInfoProto.getCreationTime();
+ }
+
+ @JsonProperty("modificationTime")
+ public long getModificationTime() {
+ return keyInfoProto.getModificationTime();
+ }
+
+ @JsonProperty("isKey")
+ public boolean isKey() {
+ return keyInfoProto.getIsFile();
+ }
+
+ public long getParentId() {
+ return keyInfoProto.getParentID();
+ }
+
+ public String getVolumeName() {
+ return keyInfoProto.getVolumeName();
+ }
+
+ public String getBucketName() {
+ return keyInfoProto.getBucketName();
+ }
+
+ /** Returns the key name of the key stored in the OM Key Info object. */
+ public String getKeyName() {
+ return keyInfoProto.getKeyName();
+ }
+}
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ListKeysResponse.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ListKeysResponse.java
index 7220060aeb..2770e7f7f6 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ListKeysResponse.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ListKeysResponse.java
@@ -51,7 +51,7 @@ public class ListKeysResponse {
/** list of keys. */
@JsonProperty("keys")
- private List<KeyEntityInfo> keys;
+ private List<KeyEntityInfoProtoWrapper> keys;
public ListKeysResponse() {
@@ -95,11 +95,11 @@ public class ListKeysResponse {
this.path = path;
}
- public List<KeyEntityInfo> getKeys() {
+ public List<KeyEntityInfoProtoWrapper> getKeys() {
return keys;
}
- public void setKeys(List<KeyEntityInfo> keys) {
+ public void setKeys(List<KeyEntityInfoProtoWrapper> keys) {
this.keys = keys;
}
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java
index 14ae997073..82913f453d 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java
@@ -23,9 +23,12 @@ import java.io.IOException;
import java.util.List;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfoProtoWrapper;
/**
* Interface for the OM Metadata Manager + DB store maintained by
@@ -113,4 +116,13 @@ public interface ReconOMMetadataManager extends
OMMetadataManager {
*/
OzoneConfiguration getOzoneConfiguration();
+ /**
+ * A lighter weight version of the getKeyTable method that only returns the
KeyEntityInfo wrapper object. This
+ * avoids creating a full OMKeyInfo object for each key if it is not needed.
+ * @param bucketLayout The Bucket layout to use for the key table.
+ * @return A table of keys and their metadata.
+ * @throws IOException
+ */
+ Table<String, KeyEntityInfoProtoWrapper> getKeyTableLite(BucketLayout
bucketLayout) throws IOException;
+
}
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
index 91cb61369f..f750a0abb6 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
@@ -41,9 +41,11 @@ import org.apache.hadoop.hdds.utils.db.cache.TableCache;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.recon.ReconUtils;
+import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfoProtoWrapper;
import org.eclipse.jetty.util.StringUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -97,6 +99,7 @@ public class ReconOmMetadataManagerImpl extends
OmMetadataManagerImpl
.setName(dbFile.getName())
.setPath(dbFile.toPath().getParent());
addOMTablesAndCodecs(dbStoreBuilder);
+ dbStoreBuilder.addCodec(KeyEntityInfoProtoWrapper.class,
KeyEntityInfoProtoWrapper.getCodec());
setStore(dbStoreBuilder.build());
LOG.info("Created OM DB handle from snapshot at {}.",
dbFile.getAbsolutePath());
@@ -109,6 +112,12 @@ public class ReconOmMetadataManagerImpl extends
OmMetadataManagerImpl
}
}
+ @Override
+ public Table<String, KeyEntityInfoProtoWrapper> getKeyTableLite(BucketLayout
bucketLayout) throws IOException {
+ String tableName = bucketLayout.isFileSystemOptimized() ? FILE_TABLE :
KEY_TABLE;
+ return getStore().getTable(tableName, String.class,
KeyEntityInfoProtoWrapper.class);
+ }
+
@Override
public void updateOmDB(File newDbLocation) throws IOException {
if (getStore() != null) {
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java
index a1e8585401..398d494ea0 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.recon.ReconTestInjector;
import org.apache.hadoop.ozone.recon.ReconUtils;
-import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo;
+import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfoProtoWrapper;
import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse;
import org.apache.hadoop.ozone.recon.api.types.ListKeysResponse;
import org.apache.hadoop.ozone.recon.api.types.NSSummary;
@@ -1579,7 +1579,7 @@ public class TestOmDBInsightEndPoint extends
AbstractReconSqlDBTest {
"", 1000);
ListKeysResponse listKeysResponse = (ListKeysResponse)
bucketResponse.getEntity();
assertEquals(6, listKeysResponse.getKeys().size());
- KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0);
+ KeyEntityInfoProtoWrapper keyEntityInfo =
listKeysResponse.getKeys().get(0);
assertEquals("volume1/fso-bucket/dir1/file1", keyEntityInfo.getPath());
assertEquals("/1/10/11/file1", keyEntityInfo.getKey());
assertEquals("/1/10/13/testfile", listKeysResponse.getLastKey());
@@ -1611,7 +1611,7 @@ public class TestOmDBInsightEndPoint extends
AbstractReconSqlDBTest {
"", 2);
ListKeysResponse listKeysResponse = (ListKeysResponse)
bucketResponse.getEntity();
assertEquals(2, listKeysResponse.getKeys().size());
- KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0);
+ KeyEntityInfoProtoWrapper keyEntityInfo =
listKeysResponse.getKeys().get(0);
assertEquals("volume1/fso-bucket/dir1/file1", keyEntityInfo.getPath());
assertEquals("/1/10/11/testfile", listKeysResponse.getLastKey());
assertEquals("RATIS",
keyEntityInfo.getReplicationConfig().getReplicationType().toString());
@@ -1653,7 +1653,7 @@ public class TestOmDBInsightEndPoint extends
AbstractReconSqlDBTest {
"", 2);
ListKeysResponse listKeysResponse = (ListKeysResponse)
bucketResponse.getEntity();
assertEquals(2, listKeysResponse.getKeys().size());
- KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0);
+ KeyEntityInfoProtoWrapper keyEntityInfo =
listKeysResponse.getKeys().get(0);
assertEquals("volume1/fso-bucket/dir1/file1", keyEntityInfo.getPath());
assertEquals("/1/10/11/testfile", listKeysResponse.getLastKey());
assertEquals("RATIS",
keyEntityInfo.getReplicationConfig().getReplicationType().toString());
@@ -1695,7 +1695,7 @@ public class TestOmDBInsightEndPoint extends
AbstractReconSqlDBTest {
"", 1);
ListKeysResponse listKeysResponse = (ListKeysResponse)
bucketResponse.getEntity();
assertEquals(1, listKeysResponse.getKeys().size());
- KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0);
+ KeyEntityInfoProtoWrapper keyEntityInfo =
listKeysResponse.getKeys().get(0);
assertEquals("volume1/fso-bucket/dir1/file1", keyEntityInfo.getPath());
assertEquals("/1/10/11/file1", listKeysResponse.getLastKey());
assertEquals("RATIS",
keyEntityInfo.getReplicationConfig().getReplicationType().toString());
@@ -1746,7 +1746,7 @@ public class TestOmDBInsightEndPoint extends
AbstractReconSqlDBTest {
"", 3);
ListKeysResponse listKeysResponse = (ListKeysResponse)
bucketResponse.getEntity();
assertEquals(3, listKeysResponse.getKeys().size());
- KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0);
+ KeyEntityInfoProtoWrapper keyEntityInfo =
listKeysResponse.getKeys().get(0);
assertEquals("volume1/fso-bucket2/dir8/file1", keyEntityInfo.getPath());
assertEquals("/1/30/32/file1", listKeysResponse.getLastKey());
assertEquals("RATIS",
keyEntityInfo.getReplicationConfig().getReplicationType().toString());
@@ -1779,7 +1779,7 @@ public class TestOmDBInsightEndPoint extends
AbstractReconSqlDBTest {
"", 2);
ListKeysResponse listKeysResponse = (ListKeysResponse)
bucketResponse.getEntity();
assertEquals(2, listKeysResponse.getKeys().size());
- KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0);
+ KeyEntityInfoProtoWrapper keyEntityInfo =
listKeysResponse.getKeys().get(0);
assertEquals("volume1/fso-bucket/dir1/dir2/file1",
keyEntityInfo.getPath());
assertEquals("/1/10/12/testfile", listKeysResponse.getLastKey());
assertEquals("RATIS",
keyEntityInfo.getReplicationConfig().getReplicationType().toString());
@@ -1812,7 +1812,7 @@ public class TestOmDBInsightEndPoint extends
AbstractReconSqlDBTest {
"", 2);
ListKeysResponse listKeysResponse = (ListKeysResponse)
bucketResponse.getEntity();
assertEquals(2, listKeysResponse.getKeys().size());
- KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0);
+ KeyEntityInfoProtoWrapper keyEntityInfo =
listKeysResponse.getKeys().get(0);
assertEquals("volume1/fso-bucket/dir1/dir2/dir3/file1",
keyEntityInfo.getPath());
assertEquals("/1/10/13/testfile", listKeysResponse.getLastKey());
assertEquals("RATIS",
keyEntityInfo.getReplicationConfig().getReplicationType().toString());
@@ -1899,7 +1899,7 @@ public class TestOmDBInsightEndPoint extends
AbstractReconSqlDBTest {
"", 2);
ListKeysResponse listKeysResponse = (ListKeysResponse)
bucketResponse.getEntity();
assertEquals(2, listKeysResponse.getKeys().size());
- KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0);
+ KeyEntityInfoProtoWrapper keyEntityInfo =
listKeysResponse.getKeys().get(0);
assertEquals("volume1/obs-bucket/key1", keyEntityInfo.getPath());
assertEquals("/volume1/obs-bucket/key1/key2",
listKeysResponse.getLastKey());
assertEquals("RATIS",
keyEntityInfo.getReplicationConfig().getReplicationType().toString());
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]