This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new bf29f7ffb7 HDDS-13235. The equals/hashCode methods in anonymous 
KeyValue classes may not work. (#8607)
bf29f7ffb7 is described below

commit bf29f7ffb75cb6b1a0b328463cd58e0f7b972aa7
Author: Tsz-Wo Nicholas Sze <[email protected]>
AuthorDate: Thu Jun 12 14:00:34 2025 -0700

    HDDS-13235. The equals/hashCode methods in anonymous KeyValue classes may 
not work. (#8607)
---
 .../metadata/SchemaOneDeletedBlocksTable.java      |  34 +-----
 .../hdds/utils/db/RDBStoreByteArrayIterator.java   |   2 +-
 .../apache/hadoop/hdds/utils/db/RawKeyValue.java   |  85 ---------------
 .../org/apache/hadoop/hdds/utils/db/Table.java     | 116 +++++++--------------
 .../apache/hadoop/hdds/utils/db/TypedTable.java    |  57 +++-------
 .../utils/db/TestRDBStoreByteArrayIterator.java    |   3 +-
 .../utils/db/TestRDBStoreCodecBufferIterator.java  |  11 +-
 .../scm/block/DeletedBlockLogStateManagerImpl.java |   9 +-
 .../hadoop/hdds/scm/server/SCMCertStore.java       |  16 +--
 .../ozone/om/service/KeyDeletingService.java       |  10 +-
 .../hadoop/ozone/om/service/QuotaRepairTask.java   |  26 ++---
 .../apache/hadoop/ozone/om/TestKeyManagerImpl.java |  14 +--
 .../TestOMSnapshotMoveTableKeysResponse.java       |  56 ++++------
 .../ozone/om/service/TestKeyDeletingService.java   |   8 +-
 .../om/snapshot/TestFSODirectoryPathResolver.java  |  15 +--
 .../hadoop/ozone/recon/api/TestEndpoints.java      |   7 +-
 .../ozone/recon/tasks/TestFileSizeCountTask.java   |   9 +-
 .../ozone/recon/tasks/TestOmTableInsightTask.java  |   3 +-
 18 files changed, 121 insertions(+), 360 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneDeletedBlocksTable.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneDeletedBlocksTable.java
index 880a79938c..c4f8b66570 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneDeletedBlocksTable.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneDeletedBlocksTable.java
@@ -18,8 +18,8 @@
 package org.apache.hadoop.ozone.container.metadata;
 
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.List;
+import java.util.stream.Collectors;
 import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.hdds.utils.db.Table;
@@ -145,39 +145,13 @@ private static String unprefix(String key) {
   private static List<KeyValue<String, ChunkInfoList>> unprefix(
       List<? extends KeyValue<String, ChunkInfoList>> kvs) {
 
-    List<KeyValue<String, ChunkInfoList>> processedKVs = new ArrayList<>();
-    kvs.forEach(kv -> processedKVs.add(new UnprefixedKeyValue(kv)));
-
-    return processedKVs;
+    return kvs.stream()
+        .map(kv -> Table.newKeyValue(unprefix(kv.getKey()), kv.getValue()))
+        .collect(Collectors.toList());
   }
 
   private static MetadataKeyFilters.KeyPrefixFilter getDeletedFilter() {
     return (new MetadataKeyFilters.KeyPrefixFilter())
             .addFilter(DELETED_KEY_PREFIX);
   }
-
-  /**
-   * {@link KeyValue} implementation that removes the deleted key prefix from
-   * an existing {@link KeyValue}.
-   */
-  private static class UnprefixedKeyValue implements KeyValue<String,
-        ChunkInfoList> {
-
-    private final KeyValue<String, ChunkInfoList> prefixedKeyValue;
-
-    UnprefixedKeyValue(
-            KeyValue<String, ChunkInfoList> prefixedKeyValue) {
-      this.prefixedKeyValue = prefixedKeyValue;
-    }
-
-    @Override
-    public String getKey() throws IOException {
-      return unprefix(prefixedKeyValue.getKey());
-    }
-
-    @Override
-    public ChunkInfoList getValue() throws IOException {
-      return prefixedKeyValue.getValue();
-    }
-  }
 }
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreByteArrayIterator.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreByteArrayIterator.java
index 62303c8a3b..b821fd932a 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreByteArrayIterator.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreByteArrayIterator.java
@@ -40,7 +40,7 @@ byte[] key() {
   @Override
   Table.KeyValue<byte[], byte[]> getKeyValue() {
     final ManagedRocksIterator i = getRocksDBIterator();
-    return RawKeyValue.create(i.get().key(), i.get().value());
+    return Table.newKeyValue(i.get().key(), i.get().value());
   }
 
   @Override
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RawKeyValue.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RawKeyValue.java
deleted file mode 100644
index c7250617b6..0000000000
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RawKeyValue.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.utils.db;
-
-import java.util.Arrays;
-import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
-
-/**
- * {@link KeyValue} for a raw type.
- *
- * @param <RAW> The raw type.
- */
-public abstract class RawKeyValue<RAW> implements KeyValue<RAW, RAW> {
-
-  private final RAW key;
-  private final RAW value;
-
-  /**
-   * Create a KeyValue pair.
-   *
-   * @param key   - Key Bytes
-   * @param value - Value bytes
-   * @return KeyValue object.
-   */
-  public static ByteArray create(byte[] key, byte[] value) {
-    return new ByteArray(key, value);
-  }
-
-  /** Implement {@link RawKeyValue} with byte[]. */
-  public static final class ByteArray extends RawKeyValue<byte[]> {
-    static byte[] copy(byte[] bytes) {
-      return Arrays.copyOf(bytes, bytes.length);
-    }
-
-    private ByteArray(byte[] key, byte[] value) {
-      super(key, value);
-    }
-
-    @Override
-    public byte[] getKey() {
-      return copy(super.getKey());
-    }
-
-    @Override
-    public byte[] getValue() {
-      return copy(super.getValue());
-    }
-  }
-
-  private RawKeyValue(RAW key, RAW value) {
-    this.key = key;
-    this.value = value;
-  }
-
-  /**
-   * Return key.
-   */
-  @Override
-  public RAW getKey() {
-    return key;
-  }
-
-  /**
-   * Return value.
-   */
-  @Override
-  public RAW getValue() {
-    return value;
-  }
-}
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
index 9552f327e2..5097cb0efc 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
@@ -325,94 +325,58 @@ void deleteBatchWithPrefix(BatchOperation batch, KEY 
prefix)
   /**
    * Class used to represent the key and value pair of a db entry.
    */
-  interface KeyValue<KEY, VALUE> {
-
-    KEY getKey() throws IOException;
+  final class KeyValue<K, V> {
+    private final K key;
+    private final V value;
+    private final int rawSize;
+
+    private KeyValue(K key, V value, int rawSize) {
+      this.key = key;
+      this.value = value;
+      this.rawSize = rawSize;
+    }
 
-    VALUE getValue() throws IOException;
+    public K getKey() {
+      return key;
+    }
 
-    default int getRawSize()  throws IOException {
-      return 0;
+    public V getValue() {
+      return value;
     }
-  }
 
-  static <K, V> KeyValue<K, V> newKeyValue(K key, V value) {
-    return new KeyValue<K, V>() {
-      @Override
-      public K getKey() {
-        return key;
-      }
+    public int getRawSize() {
+      return rawSize;
+    }
 
-      @Override
-      public V getValue() {
-        return value;
-      }
+    @Override
+    public String toString() {
+      return "(key=" + key + ", value=" + value + ")";
+    }
 
-      @Override
-      public String toString() {
-        return "(key=" + key + ", value=" + value + ")";
+    @Override
+    public boolean equals(Object obj) {
+      if (this == obj) {
+        return true;
+      } else if (!(obj instanceof KeyValue)) {
+        return false;
       }
+      final KeyValue<?, ?> that = (KeyValue<?, ?>) obj;
+      return this.getKey().equals(that.getKey())
+          && this.getValue().equals(that.getValue());
+    }
 
-      @Override
-      public boolean equals(Object obj) {
-        if (!(obj instanceof KeyValue)) {
-          return false;
-        }
-        KeyValue<?, ?> kv = (KeyValue<?, ?>) obj;
-        try {
-          return getKey().equals(kv.getKey()) && 
getValue().equals(kv.getValue());
-        } catch (IOException e) {
-          throw new RuntimeException(e);
-        }
-      }
+    @Override
+    public int hashCode() {
+      return Objects.hash(getKey(), getValue());
+    }
+  }
 
-      @Override
-      public int hashCode() {
-        return Objects.hash(getKey(), getValue());
-      }
-    };
+  static <K, V> KeyValue<K, V> newKeyValue(K key, V value) {
+    return newKeyValue(key, value, 0);
   }
 
   static <K, V> KeyValue<K, V> newKeyValue(K key, V value, int rawSize) {
-    return new KeyValue<K, V>() {
-      @Override
-      public K getKey() {
-        return key;
-      }
-
-      @Override
-      public V getValue() {
-        return value;
-      }
-
-      @Override
-      public int getRawSize() throws IOException {
-        return rawSize;
-      }
-
-      @Override
-      public String toString() {
-        return "(key=" + key + ", value=" + value + ")";
-      }
-
-      @Override
-      public boolean equals(Object obj) {
-        if (!(obj instanceof KeyValue)) {
-          return false;
-        }
-        KeyValue<?, ?> kv = (KeyValue<?, ?>) obj;
-        try {
-          return getKey().equals(kv.getKey()) && 
getValue().equals(kv.getValue());
-        } catch (IOException e) {
-          throw new RuntimeException(e);
-        }
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(getKey(), getValue());
-      }
-    };
+    return new KeyValue<>(key, value, rawSize);
   }
 
   /** A {@link TableIterator} to iterate {@link KeyValue}s. */
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
index dcf482aad2..c3c3935211 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
@@ -123,11 +123,11 @@ private byte[] encodeValue(VALUE value) throws 
IOException {
     return value == null ? null : valueCodec.toPersistedFormat(value);
   }
 
-  private KEY decodeKey(byte[] key) throws IOException {
+  private KEY decodeKey(byte[] key) throws CodecException {
     return key == null ? null : keyCodec.fromPersistedFormat(key);
   }
 
-  private VALUE decodeValue(byte[] value) throws IOException {
+  private VALUE decodeValue(byte[] value) throws CodecException {
     return value == null ? null : valueCodec.fromPersistedFormat(value);
   }
 
@@ -465,7 +465,7 @@ public TableCacheMetrics createCacheMetrics() {
   }
 
   @Override
-  public List<TypedKeyValue> getRangeKVs(
+  public List<KeyValue<KEY, VALUE>> getRangeKVs(
           KEY startKey, int count, KEY prefix,
           MetadataKeyFilters.MetadataKeyFilter... filters)
           throws IOException, IllegalArgumentException {
@@ -477,15 +477,11 @@ public List<TypedKeyValue> getRangeKVs(
 
     List<? extends KeyValue<byte[], byte[]>> rangeKVBytes =
         rawTable.getRangeKVs(startKeyBytes, count, prefixBytes, filters);
-
-    List<TypedKeyValue> rangeKVs = new ArrayList<>();
-    rangeKVBytes.forEach(byteKV -> rangeKVs.add(new TypedKeyValue(byteKV)));
-
-    return rangeKVs;
+    return convert(rangeKVBytes);
   }
 
   @Override
-  public List<TypedKeyValue> getSequentialRangeKVs(
+  public List<KeyValue<KEY, VALUE>> getSequentialRangeKVs(
           KEY startKey, int count, KEY prefix,
           MetadataKeyFilters.MetadataKeyFilter... filters)
           throws IOException, IllegalArgumentException {
@@ -498,10 +494,15 @@ public List<TypedKeyValue> getSequentialRangeKVs(
     List<? extends KeyValue<byte[], byte[]>> rangeKVBytes =
         rawTable.getSequentialRangeKVs(startKeyBytes, count,
             prefixBytes, filters);
+    return convert(rangeKVBytes);
+  }
 
-    List<TypedKeyValue> rangeKVs = new ArrayList<>();
-    rangeKVBytes.forEach(byteKV -> rangeKVs.add(new TypedKeyValue(byteKV)));
-
+  private List<KeyValue<KEY, VALUE>> convert(List<? extends KeyValue<byte[], 
byte[]>> rangeKVBytes)
+      throws CodecException {
+    final List<KeyValue<KEY, VALUE>> rangeKVs = new ArrayList<>();
+    for (KeyValue<byte[], byte[]> kv : rangeKVBytes) {
+      rangeKVs.add(Table.newKeyValue(decodeKey(kv.getKey()), 
decodeValue(kv.getValue())));
+    }
     return rangeKVs;
   }
 
@@ -532,28 +533,6 @@ TableCache<KEY, VALUE> getCache() {
     return cache;
   }
 
-  /**
-   * Key value implementation for strongly typed tables.
-   */
-  public final class TypedKeyValue implements KeyValue<KEY, VALUE> {
-
-    private final KeyValue<byte[], byte[]> rawKeyValue;
-
-    private TypedKeyValue(KeyValue<byte[], byte[]> rawKeyValue) {
-      this.rawKeyValue = rawKeyValue;
-    }
-
-    @Override
-    public KEY getKey() throws IOException {
-      return decodeKey(rawKeyValue.getKey());
-    }
-
-    @Override
-    public VALUE getValue() throws IOException {
-      return decodeValue(rawKeyValue.getValue());
-    }
-  }
-
   RawIterator<CodecBuffer> newCodecBufferTableIterator(
       TableIterator<CodecBuffer, KeyValue<CodecBuffer, CodecBuffer>> i) {
     return new RawIterator<CodecBuffer>(i) {
@@ -574,8 +553,7 @@ public CodecBuffer get() {
       }
 
       @Override
-      KeyValue<KEY, VALUE> convert(KeyValue<CodecBuffer, CodecBuffer> raw)
-          throws IOException {
+      KeyValue<KEY, VALUE> convert(KeyValue<CodecBuffer, CodecBuffer> raw) 
throws CodecException {
         final int rawSize = raw.getValue().readableBytes();
         final KEY key = keyCodec.fromCodecBuffer(raw.getKey());
         final VALUE value = valueCodec.fromCodecBuffer(raw.getValue());
@@ -600,8 +578,8 @@ AutoCloseSupplier<byte[]> convert(KEY key) throws 
IOException {
     }
 
     @Override
-    KeyValue<KEY, VALUE> convert(KeyValue<byte[], byte[]> raw) {
-      return new TypedKeyValue(raw);
+    KeyValue<KEY, VALUE> convert(KeyValue<byte[], byte[]> raw) throws 
CodecException {
+      return Table.newKeyValue(decodeKey(raw.getKey()), 
decodeValue(raw.getValue()));
     }
   }
 
@@ -625,8 +603,7 @@ abstract class RawIterator<RAW>
      * Covert the given {@link Table.KeyValue}
      * from ({@link RAW}, {@link RAW}) to ({@link KEY}, {@link VALUE}).
      */
-    abstract KeyValue<KEY, VALUE> convert(KeyValue<RAW, RAW> raw)
-        throws IOException;
+    abstract KeyValue<KEY, VALUE> convert(KeyValue<RAW, RAW> raw) throws 
CodecException;
 
     @Override
     public void seekToFirst() {
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreByteArrayIterator.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreByteArrayIterator.java
index 7a11f55720..0054dde422 100644
--- 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreByteArrayIterator.java
+++ 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreByteArrayIterator.java
@@ -95,8 +95,7 @@ public void 
testForeachRemainingCallsConsumerWithAllElements() {
     RDBStoreByteArrayIterator iter = newIterator();
     iter.forEachRemaining(consumerStub);
 
-    ArgumentCaptor<RawKeyValue.ByteArray> capture =
-        forClass(RawKeyValue.ByteArray.class);
+    final ArgumentCaptor<Table.KeyValue<byte[], byte[]>> capture = 
forClass(Table.KeyValue.class);
     verify(consumerStub, times(3)).accept(capture.capture());
     assertArrayEquals(
         new byte[]{0x00}, capture.getAllValues().get(0).getKey());
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreCodecBufferIterator.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreCodecBufferIterator.java
index 05a4c94639..500c465a4d 100644
--- 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreCodecBufferIterator.java
+++ 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreCodecBufferIterator.java
@@ -31,7 +31,6 @@
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
-import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
@@ -116,14 +115,8 @@ public void testForEachRemaining() throws Exception {
 
     List<Table.KeyValue<byte[], byte[]>> remaining = new ArrayList<>();
     try (RDBStoreCodecBufferIterator i = newIterator()) {
-      i.forEachRemaining(kv -> {
-        try {
-          remaining.add(RawKeyValue.create(
-              kv.getKey().getArray(), kv.getValue().getArray()));
-        } catch (IOException e) {
-          throw new RuntimeException(e);
-        }
-      });
+      i.forEachRemaining(kv ->
+          remaining.add(Table.newKeyValue(kv.getKey().getArray(), 
kv.getValue().getArray())));
 
       System.out.println("remaining: " + remaining);
       assertArrayEquals(new byte[]{0x00}, remaining.get(0).getKey());
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java
index 50ce312933..f198a425e7 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogStateManagerImpl.java
@@ -84,12 +84,7 @@ private void findNext() {
         while (iter.hasNext()) {
           TypedTable.KeyValue<Long, DeletedBlocksTransaction> next = iter
               .next();
-          long txID;
-          try {
-            txID = next.getKey();
-          } catch (IOException e) {
-            throw new IllegalStateException("");
-          }
+          final long txID = next.getKey();
 
           if ((deletingTxIDs == null || !deletingTxIDs.contains(txID)) && (
               skippingRetryTxIDs == null || !skippingRetryTxIDs
@@ -146,7 +141,7 @@ public TypedTable.KeyValue<Long, DeletedBlocksTransaction> 
seek(
       }
 
       @Override
-      public void removeFromDB() throws IOException {
+      public void removeFromDB() {
         throw new UnsupportedOperationException("read-only");
       }
     };
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMCertStore.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMCertStore.java
index da5597bf41..b3751ab355 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMCertStore.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMCertStore.java
@@ -28,6 +28,7 @@
 import java.util.List;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
+import java.util.stream.Collectors;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
 import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol;
 import org.apache.hadoop.hdds.scm.ha.SCMRatisServer;
@@ -160,24 +161,15 @@ public X509Certificate getCertificateByID(BigInteger 
serialID)
   public List<X509Certificate> listCertificate(NodeType role,
       BigInteger startSerialID, int count)
       throws IOException {
-    List<X509Certificate> results = new ArrayList<>();
-    String errorMessage = "Fail to list certificate from SCM metadata store";
     Preconditions.checkNotNull(startSerialID);
 
     if (startSerialID.longValue() == 0) {
       startSerialID = null;
     }
 
-    for (Table.KeyValue<BigInteger, X509Certificate> kv : 
getValidCertTableList(role, startSerialID, count)) {
-      try {
-        X509Certificate cert = kv.getValue();
-        results.add(cert);
-      } catch (IOException e) {
-        LOG.error(errorMessage, e);
-        throw new SCMSecurityException(errorMessage);
-      }
-    }
-    return results;
+    return getValidCertTableList(role, startSerialID, count).stream()
+        .map(Table.KeyValue::getValue)
+        .collect(Collectors.toList());
   }
 
   private List<? extends Table.KeyValue<BigInteger, X509Certificate>>
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java
index c707875814..d875e95e85 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java
@@ -46,6 +46,7 @@
 import org.apache.hadoop.hdds.utils.BackgroundTaskQueue;
 import org.apache.hadoop.hdds.utils.BackgroundTaskResult;
 import org.apache.hadoop.hdds.utils.BackgroundTaskResult.EmptyTaskResult;
+import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.ozone.ClientVersion;
 import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
@@ -358,13 +359,8 @@ private void processDeletedKeysForStore(SnapshotInfo 
currentSnapshotInfo, KeyMan
                  keyManager, lock)) {
           List<String> renamedTableEntries =
               keyManager.getRenamesKeyEntries(volume, bucket, null, 
renameEntryFilter, remainNum).stream()
-                  .map(entry -> {
-                    try {
-                      return entry.getKey();
-                    } catch (IOException e) {
-                      throw new UncheckedIOException(e);
-                    }
-                  }).collect(Collectors.toList());
+                  .map(Table.KeyValue::getKey)
+                  .collect(Collectors.toList());
           remainNum -= renamedTableEntries.size();
 
           // Get pending keys that can be deleted
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java
index 99cebe9a63..1204ce2ecc 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/QuotaRepairTask.java
@@ -399,22 +399,18 @@ private static <VALUE> void extractCount(
       Table.KeyValue<String, VALUE> kv,
       Map<String, CountPair> prefixUsageMap,
       boolean haveValue) {
-    try {
-      String prefix = getVolumeBucketPrefix(kv.getKey());
-      CountPair usage = prefixUsageMap.get(prefix);
-      if (null == usage) {
-        return;
-      }
-      usage.incrNamespace(1L);
-      // avoid decode of value
-      if (haveValue) {
-        VALUE value = kv.getValue();
-        if (value instanceof OmKeyInfo) {
-          usage.incrSpace(((OmKeyInfo) value).getReplicatedSize());
-        }
+    String prefix = getVolumeBucketPrefix(kv.getKey());
+    CountPair usage = prefixUsageMap.get(prefix);
+    if (null == usage) {
+      return;
+    }
+    usage.incrNamespace(1L);
+    // avoid decode of value
+    if (haveValue) {
+      VALUE value = kv.getValue();
+      if (value instanceof OmKeyInfo) {
+        usage.incrSpace(((OmKeyInfo) value).getReplicatedSize());
       }
-    } catch (IOException ex) {
-      throw new UncheckedIOException(ex);
     }
   }
   
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
index 645e561a20..d021cc7525 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
@@ -134,15 +134,11 @@ public void testGetDeletedKeyEntries(int numberOfVolumes, 
int numberOfBucketsPer
         volumeNamePrefix, bucketNamePrefix, keyPrefix, volumeNumber, 
bucketNumber, startVolumeNumber, startBucketNumber,
         startKeyNumber, filter, numberOfEntries).stream()
         .map(kv -> {
-          try {
-            String key = kv.getKey();
-            RepeatedOmKeyInfo value = kv.getValue();
-            List<OmKeyInfo> omKeyInfos = 
Collections.singletonList(Mockito.mock(OmKeyInfo.class));
-            when(value.cloneOmKeyInfoList()).thenReturn(omKeyInfos);
-            return Table.newKeyValue(key, omKeyInfos);
-          } catch (IOException e) {
-            throw new RuntimeException(e);
-          }
+          String key = kv.getKey();
+          RepeatedOmKeyInfo value = kv.getValue();
+          List<OmKeyInfo> omKeyInfos = 
Collections.singletonList(Mockito.mock(OmKeyInfo.class));
+          when(value.cloneOmKeyInfoList()).thenReturn(omKeyInfos);
+          return Table.newKeyValue(key, omKeyInfos);
         }).collect(Collectors.toList());
     String volumeName = volumeNumber == null ? null : 
(String.format("%s%010d", volumeNamePrefix, volumeNumber));
     String bucketName = bucketNumber == null ? null : 
(String.format("%s%010d", bucketNamePrefix, bucketNumber));
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotMoveTableKeysResponse.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotMoveTableKeysResponse.java
index aabacd8c50..3130c42df3 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotMoveTableKeysResponse.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotMoveTableKeysResponse.java
@@ -118,31 +118,19 @@ public void testMoveTableKeysToNextSnapshot(boolean 
nextSnapshotExists) throws E
       Map<String, String> renameEntries = new HashMap<>();
       snapshot.getMetadataManager().getDeletedTable().iterator()
           .forEachRemaining(entry -> {
-            try {
-              
deletedTable.add(OzoneManagerProtocolProtos.SnapshotMoveKeyInfos.newBuilder().setKey(entry.getKey())
-                  
.addAllKeyInfos(entry.getValue().getOmKeyInfoList().stream().map(omKeyInfo -> 
omKeyInfo.getProtobuf(
-                      
ClientVersion.CURRENT_VERSION)).collect(Collectors.toList())).build());
-            } catch (IOException e) {
-              throw new RuntimeException(e);
-            }
+            
deletedTable.add(OzoneManagerProtocolProtos.SnapshotMoveKeyInfos.newBuilder().setKey(entry.getKey())
+                
.addAllKeyInfos(entry.getValue().getOmKeyInfoList().stream().map(omKeyInfo -> 
omKeyInfo.getProtobuf(
+                    
ClientVersion.CURRENT_VERSION)).collect(Collectors.toList())).build());
           });
 
       snapshot.getMetadataManager().getDeletedDirTable().iterator()
           .forEachRemaining(entry -> {
-            try {
-              
deletedDirTable.add(OzoneManagerProtocolProtos.SnapshotMoveKeyInfos.newBuilder().setKey(entry.getKey())
-                  
.addKeyInfos(entry.getValue().getProtobuf(ClientVersion.CURRENT_VERSION)).build());
-            } catch (IOException e) {
-              throw new RuntimeException(e);
-            }
+            
deletedDirTable.add(OzoneManagerProtocolProtos.SnapshotMoveKeyInfos.newBuilder().setKey(entry.getKey())
+                
.addKeyInfos(entry.getValue().getProtobuf(ClientVersion.CURRENT_VERSION)).build());
           });
       
snapshot.getMetadataManager().getSnapshotRenamedTable().iterator().forEachRemaining(entry
 -> {
-        try {
-          
renamedTable.add(HddsProtos.KeyValue.newBuilder().setKey(entry.getKey()).setValue(entry.getValue()).build());
-          renameEntries.put(entry.getKey(), entry.getValue());
-        } catch (IOException e) {
-          throw new RuntimeException(e);
-        }
+        
renamedTable.add(HddsProtos.KeyValue.newBuilder().setKey(entry.getKey()).setValue(entry.getValue()).build());
+        renameEntries.put(entry.getKey(), entry.getValue());
       });
       OMSnapshotMoveTableKeysResponse response = new 
OMSnapshotMoveTableKeysResponse(
           
OzoneManagerProtocolProtos.OMResponse.newBuilder().setStatus(OzoneManagerProtocolProtos.Status.OK)
@@ -160,20 +148,16 @@ public void testMoveTableKeysToNextSnapshot(boolean 
nextSnapshotExists) throws E
       AtomicInteger count = new AtomicInteger();
       nextMetadataManager.getDeletedTable().iterator().forEachRemaining(entry 
-> {
         count.getAndIncrement();
-        try {
-          int maxCount = count.get() >= 6 && count.get() <= 8 ? 20 : 10;
-          Assertions.assertEquals(maxCount, 
entry.getValue().getOmKeyInfoList().size());
-          List<Long> versions = 
entry.getValue().getOmKeyInfoList().stream().map(OmKeyInfo::getKeyLocationVersions)
-              .map(omKeyInfo -> 
omKeyInfo.get(0).getVersion()).collect(Collectors.toList());
-          List<Long> expectedVersions = new ArrayList<>();
-          if (maxCount == 20) {
-            expectedVersions.addAll(LongStream.range(10, 
20).boxed().collect(Collectors.toList()));
-          }
-          expectedVersions.addAll(LongStream.range(0, 
10).boxed().collect(Collectors.toList()));
-          Assertions.assertEquals(expectedVersions, versions);
-        } catch (IOException e) {
-          throw new RuntimeException(e);
+        int maxCount = count.get() >= 6 && count.get() <= 8 ? 20 : 10;
+        Assertions.assertEquals(maxCount, 
entry.getValue().getOmKeyInfoList().size());
+        List<Long> versions = 
entry.getValue().getOmKeyInfoList().stream().map(OmKeyInfo::getKeyLocationVersions)
+            .map(omKeyInfo -> 
omKeyInfo.get(0).getVersion()).collect(Collectors.toList());
+        List<Long> expectedVersions = new ArrayList<>();
+        if (maxCount == 20) {
+          expectedVersions.addAll(LongStream.range(10, 
20).boxed().collect(Collectors.toList()));
         }
+        expectedVersions.addAll(LongStream.range(0, 
10).boxed().collect(Collectors.toList()));
+        Assertions.assertEquals(expectedVersions, versions);
       });
       Assertions.assertEquals(15, count.get());
       count.set(0);
@@ -182,12 +166,8 @@ public void testMoveTableKeysToNextSnapshot(boolean 
nextSnapshotExists) throws E
       Assertions.assertEquals(15, count.get());
       count.set(0);
       
nextMetadataManager.getSnapshotRenamedTable().iterator().forEachRemaining(entry 
-> {
-        try {
-          String expectedValue = renameEntries.getOrDefault(entry.getKey(), 
entry.getValue());
-          Assertions.assertEquals(expectedValue, entry.getValue());
-        } catch (IOException e) {
-          throw new RuntimeException(e);
-        }
+        String expectedValue = renameEntries.getOrDefault(entry.getKey(), 
entry.getValue());
+        Assertions.assertEquals(expectedValue, entry.getValue());
         count.getAndIncrement();
       });
       Assertions.assertEquals(15, count.get());
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java
index 3c32a70e04..b3178580b5 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java
@@ -486,12 +486,8 @@ public void testRenamedKeyReclaimation(boolean 
testForSnapshot)
         assertTableRowCount(snapshotRenamedTable, initialRenamedCount + 1, 
metadataManager);
         try (TableIterator<String, ? extends Table.KeyValue<String, String>> 
itr = snapshotRenamedTable.iterator()) {
           itr.forEachRemaining(entry -> {
-            try {
-              String[] val = metadataManager.splitRenameKey(entry.getKey());
-              Assertions.assertEquals(Long.valueOf(val[2]), 
keyInfo.getObjectID());
-            } catch (IOException e) {
-              throw new UncheckedIOException(e);
-            }
+            String[] val = metadataManager.splitRenameKey(entry.getKey());
+            Assertions.assertEquals(Long.valueOf(val[2]), 
keyInfo.getObjectID());
           });
         }
       } finally {
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestFSODirectoryPathResolver.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestFSODirectoryPathResolver.java
index 6c3e5fef15..4fbee30ab0 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestFSODirectoryPathResolver.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestFSODirectoryPathResolver.java
@@ -57,19 +57,8 @@ private Table<String, OmDirectoryInfo> 
getMockedDirectoryInfoTable(
           Iterator<? extends Table.KeyValue<String, OmDirectoryInfo>> iterator 
=
               dirMap
               .getOrDefault(dirId, Collections.emptyList()).stream()
-              .map(children -> new Table.KeyValue<String, OmDirectoryInfo>() {
-                  @Override
-                  public String getKey() {
-                    return prefix + children + OM_KEY_PREFIX + "dir" + 
children;
-                  }
-
-                  @Override
-                  public OmDirectoryInfo getValue() {
-                    return OmDirectoryInfo.newBuilder()
-                        .setName("dir" + children).setObjectID(children)
-                        .build();
-                  }
-              })
+              .map(children -> Table.newKeyValue(prefix + children + 
OM_KEY_PREFIX + "dir" + children,
+                  OmDirectoryInfo.newBuilder().setName("dir" + 
children).setObjectID(children).build()))
               .iterator();
           return new TableIterator<String,
               Table.KeyValue<String, OmDirectoryInfo>>() {
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
index e2d3775f36..d5adaa0aa2 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
@@ -97,6 +97,7 @@
 import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
 import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
 import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
+import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.hdds.utils.db.TypedTable;
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.ozone.OzoneAcl;
@@ -833,10 +834,8 @@ public void testGetFileCounts() throws Exception {
         .TypedTableIterator.class);
     TypedTable.TypedTableIterator mockKeyIterFso = mock(TypedTable
         .TypedTableIterator.class);
-    TypedTable.TypedKeyValue mockKeyValueLegacy = mock(
-        TypedTable.TypedKeyValue.class);
-    TypedTable.TypedKeyValue mockKeyValueFso = mock(
-        TypedTable.TypedKeyValue.class);
+    final Table.KeyValue mockKeyValueLegacy = mock(Table.KeyValue.class);
+    final Table.KeyValue mockKeyValueFso = mock(Table.KeyValue.class);
 
     when(keyTableLegacy.iterator()).thenReturn(mockKeyIterLegacy);
     when(keyTableFso.iterator()).thenReturn(mockKeyIterFso);
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java
index 1eaf85499e..5e7af18358 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java
@@ -37,6 +37,7 @@
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
+import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.hdds.utils.db.TypedTable;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
@@ -112,7 +113,7 @@ public void testReprocess() throws IOException {
     when(keyTableOBS.iterator()).thenReturn(mockIterOBS);
     // Simulate three keys then end.
     when(mockIterOBS.hasNext()).thenReturn(true, true, true, false);
-    TypedTable.TypedKeyValue mockKeyValueOBS = 
mock(TypedTable.TypedKeyValue.class);
+    final Table.KeyValue mockKeyValueOBS = mock(Table.KeyValue.class);
     when(mockIterOBS.next()).thenReturn(mockKeyValueOBS);
     when(mockKeyValueOBS.getValue()).thenReturn(omKeyInfos[0], omKeyInfos[1], 
omKeyInfos[2]);
 
@@ -123,7 +124,7 @@ public void testReprocess() throws IOException {
     TypedTable.TypedTableIterator mockIterFSO = 
mock(TypedTable.TypedTableIterator.class);
     when(keyTableFSO.iterator()).thenReturn(mockIterFSO);
     when(mockIterFSO.hasNext()).thenReturn(true, true, true, false);
-    TypedTable.TypedKeyValue mockKeyValueFSO = 
mock(TypedTable.TypedKeyValue.class);
+    final Table.KeyValue mockKeyValueFSO = mock(Table.KeyValue.class);
     when(mockIterFSO.next()).thenReturn(mockKeyValueFSO);
     when(mockKeyValueFSO.getValue()).thenReturn(omKeyInfos[0], omKeyInfos[1], 
omKeyInfos[2]);
 
@@ -305,8 +306,8 @@ public void testReprocessAtScale() throws IOException {
 
     TypedTable.TypedTableIterator mockKeyIterLegacy = 
mock(TypedTable.TypedTableIterator.class);
     TypedTable.TypedTableIterator mockKeyIterFso = 
mock(TypedTable.TypedTableIterator.class);
-    TypedTable.TypedKeyValue mockKeyValueLegacy = 
mock(TypedTable.TypedKeyValue.class);
-    TypedTable.TypedKeyValue mockKeyValueFso = 
mock(TypedTable.TypedKeyValue.class);
+    final Table.KeyValue mockKeyValueLegacy = mock(Table.KeyValue.class);
+    final Table.KeyValue mockKeyValueFso = mock(Table.KeyValue.class);
 
     when(keyTableLegacy.iterator()).thenReturn(mockKeyIterLegacy);
     when(keyTableFso.iterator()).thenReturn(mockKeyIterFso);
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java
index ae83bf4f6e..40dd0c972e 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java
@@ -350,8 +350,7 @@ public void testReprocessForCount() throws Exception {
       when(omMetadataManager.getTable(tableName)).thenReturn(table);
       when(mockIter.hasNext()).thenReturn(true, true, true, true, true, false);
 
-      TypedTable.TypedKeyValue mockKeyValue =
-          mock(TypedTable.TypedKeyValue.class);
+      final Table.KeyValue mockKeyValue = mock(Table.KeyValue.class);
 
       if (tableName.equals(DELETED_TABLE)) {
         RepeatedOmKeyInfo keyInfo = mock(RepeatedOmKeyInfo.class);


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to