peterxcli commented on code in PR #7817:
URL: https://github.com/apache/ozone/pull/7817#discussion_r1957644690
##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java:
##########
@@ -1935,46 +1938,78 @@ public <KEY, VALUE> long
countEstimatedRowsInTable(Table<KEY, VALUE> table)
}
@Override
- public Set<String> getMultipartUploadKeys(
- String volumeName, String bucketName, String prefix) throws IOException {
+ public MultipartUploadKeys getMultipartUploadKeys(
+ String volumeName, String bucketName, String prefix, String keyMarker,
+ String uploadIdMarker, int maxUploads) throws IOException {
- Set<String> response = new TreeSet<>();
- Set<String> aborted = new TreeSet<>();
-
- Iterator<Map.Entry<CacheKey<String>, CacheValue<OmMultipartKeyInfo>>>
- cacheIterator = getMultipartInfoTable().cacheIterator();
+ MultipartUploadKeys.Builder resultBuilder =
MultipartUploadKeys.newBuilder();
+ SortedSet<String> responseKeys = new TreeSet<>();
+ Set<String> aborted = new HashSet<>();
String prefixKey =
OmMultipartUpload.getDbKey(volumeName, bucketName, prefix);
+ if (StringUtil.isNotBlank(keyMarker)) {
+ prefix = keyMarker;
+ if (StringUtil.isNotBlank(uploadIdMarker)) {
+ prefix = prefix + OM_KEY_PREFIX + uploadIdMarker;
+ }
+ }
+ String seekKey = OmMultipartUpload.getDbKey(volumeName, bucketName,
prefix);
+
+ Iterator<Map.Entry<CacheKey<String>, CacheValue<OmMultipartKeyInfo>>>
+ cacheIterator = getMultipartInfoTable().cacheIterator();
// First iterate all the entries in cache.
while (cacheIterator.hasNext()) {
Map.Entry<CacheKey<String>, CacheValue<OmMultipartKeyInfo>> cacheEntry =
cacheIterator.next();
- if (cacheEntry.getKey().getCacheKey().startsWith(prefixKey)) {
+ String cacheKey = cacheEntry.getKey().getCacheKey();
+ if (cacheKey.startsWith(prefixKey)) {
// Check if it is marked for delete, due to abort mpu
- if (cacheEntry.getValue().getCacheValue() != null) {
- response.add(cacheEntry.getKey().getCacheKey());
+ if (cacheEntry.getValue().getCacheValue() != null &&
+ cacheKey.compareTo(seekKey) >= 0) {
+ responseKeys.add(cacheKey);
} else {
- aborted.add(cacheEntry.getKey().getCacheKey());
+ aborted.add(cacheKey);
}
}
}
- // prefixed iterator will only iterate until keys match the prefix
+ int dbKeysCount = 0;
+ // the prefix iterator will only iterate keys that match the given prefix
+ // so we don't need to check if the key is started with prefixKey again
try (TableIterator<String, ? extends KeyValue<String, OmMultipartKeyInfo>>
iterator = getMultipartInfoTable().iterator(prefixKey)) {
+ iterator.seek(seekKey);
- while (iterator.hasNext()) {
+ while (iterator.hasNext() && dbKeysCount < maxUploads + 1) {
Review Comment:
The condition `dbKeysCount < maxUploads + 1` limits the number of db keys to
at most `maxUploads + 1`. If `dbKeysCount == maxUploads`, it still enters the
while loop.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]