This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new ba7b3cfcd9b HDDS-13330. DBStore and related classes should not throw 
IOException (#8691)
ba7b3cfcd9b is described below

commit ba7b3cfcd9b42e5fd64b710e545a0bd213f843e1
Author: Tsz-Wo Nicholas Sze <szets...@apache.org>
AuthorDate: Thu Jul 3 16:56:42 2025 +0800

    HDDS-13330. DBStore and related classes should not throw IOException (#8691)
---
 .../container/common/interfaces/DBHandle.java      |  4 +-
 .../container/common/utils/DatanodeStoreCache.java | 14 +---
 .../hadoop/ozone/container/common/utils/RawDB.java |  3 +-
 .../container/common/utils/ReferenceCountedDB.java | 12 +--
 .../keyvalue/helpers/KeyValueContainerUtil.java    |  9 +--
 .../container/metadata/AbstractDatanodeStore.java  | 20 +++--
 .../ozone/container/metadata/AbstractRDBStore.java | 26 +++---
 .../ozone/container/metadata/DBStoreManager.java   | 16 +---
 .../WitnessedContainerMetadataStoreImpl.java       |  7 +-
 .../hdds/utils/db/DBColumnFamilyDefinition.java    |  6 +-
 .../org/apache/hadoop/hdds/utils/db/DBStore.java   | 86 +++++---------------
 .../hadoop/hdds/utils/db/DBStoreBuilder.java       | 19 +++--
 .../org/apache/hadoop/hdds/utils/db/RDBStore.java  | 92 +++++++---------------
 .../apache/hadoop/hdds/utils/db/TestRDBStore.java  | 44 +----------
 .../hdds/utils/db/RocksDatabaseException.java      | 10 ++-
 .../ozone/om/snapshot/TestSnapshotCache.java       |  3 +-
 16 files changed, 105 insertions(+), 266 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/DBHandle.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/DBHandle.java
index 063718e6a04..8a876dfacf3 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/DBHandle.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/DBHandle.java
@@ -17,13 +17,13 @@
 
 package org.apache.hadoop.ozone.container.common.interfaces;
 
-import java.io.Closeable;
 import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
+import org.apache.ratis.util.UncheckedAutoCloseable;
 
 /**
  * DB handle abstract class.
  */
-public abstract class DBHandle implements Closeable {
+public abstract class DBHandle implements UncheckedAutoCloseable {
 
   private final DatanodeStore store;
   private final String containerDBPath;
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/DatanodeStoreCache.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/DatanodeStoreCache.java
index 3b8d78a494c..bb91187ea84 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/DatanodeStoreCache.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/DatanodeStoreCache.java
@@ -100,11 +100,7 @@ public void removeDB(String containerDBPath) {
       return;
     }
 
-    try {
-      db.getStore().stop();
-    } catch (Exception e) {
-      LOG.error("Stop DatanodeStore: {} failed", containerDBPath, e);
-    }
+    db.getStore().stop();
     LOG.info("Removed db {} from cache", containerDBPath);
   }
 
@@ -117,12 +113,8 @@ public void shutdownCache() {
       return;
     }
 
-    for (Map.Entry<String, RawDB> entry : datanodeStoreMap.entrySet()) {
-      try {
-        entry.getValue().getStore().stop();
-      } catch (Exception e) {
-        LOG.warn("Stop DatanodeStore: {} failed", entry.getKey(), e);
-      }
+    for (RawDB db : datanodeStoreMap.values()) {
+      db.getStore().stop();
     }
     datanodeStoreMap.clear();
   }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/RawDB.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/RawDB.java
index 34d9ce2f09a..9feae93b6c7 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/RawDB.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/RawDB.java
@@ -17,7 +17,6 @@
 
 package org.apache.hadoop.ozone.container.common.utils;
 
-import java.io.IOException;
 import org.apache.hadoop.ozone.container.common.interfaces.DBHandle;
 import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
 
@@ -32,7 +31,7 @@ public RawDB(DatanodeStore store, String containerDBPath) {
   }
 
   @Override
-  public void close() throws IOException {
+  public void close() {
     // NOTE: intend to do nothing on close
     // With schema v3, block operations on a single container should not
     // close the whole db handle.
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java
index 6866a5684d6..f32184421c7 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.ozone.container.common.utils;
 
 import com.google.common.base.Preconditions;
-import java.io.IOException;
 import java.util.concurrent.atomic.AtomicInteger;
 import org.apache.commons.lang3.exception.ExceptionUtils;
 import org.apache.hadoop.ozone.container.common.interfaces.DBHandle;
@@ -72,20 +71,15 @@ public boolean cleanup() {
         LOG.debug("Close {} refCnt {}", getContainerDBPath(),
             referenceCount.get());
       }
-      try {
-        getStore().stop();
-        return true;
-      } catch (Exception e) {
-        LOG.error("Error closing DB. Container: " + getContainerDBPath(), e);
-        return false;
-      }
+      getStore().stop();
+      return true;
     } else {
       return false;
     }
   }
 
   @Override
-  public void close() throws IOException {
+  public void close() {
     decrementReference();
   }
 
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index 885003ac1b8..0550ecad4a3 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -283,14 +283,7 @@ public static void 
parseKVContainerData(KeyValueContainerData kvContainerData,
       } else if (store != null) {
         // We only stop the store if cacheDB is null, as otherwise we would
         // close the rocksDB handle in the cache and the next reader would fail
-        try {
-          store.stop();
-        } catch (IOException e) {
-          throw e;
-        } catch (Exception e) {
-          throw new RuntimeException("Unexpected exception closing the " +
-              "RocksDB when loading containers", e);
-        }
+        store.stop();
       }
     }
   }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
index 34af94084f0..6c7a567f0ee 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
@@ -26,9 +26,11 @@
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
 import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
+import org.apache.hadoop.hdds.utils.db.CodecException;
 import org.apache.hadoop.hdds.utils.db.DBProfile;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
+import org.apache.hadoop.hdds.utils.db.RocksDatabaseException;
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
@@ -63,17 +65,16 @@ public class AbstractDatanodeStore extends 
AbstractRDBStore<AbstractDatanodeDBDe
    * Constructs the metadata store and starts the DB services.
    *
    * @param config - Ozone Configuration.
-   * @throws IOException - on Failure.
    */
   protected AbstractDatanodeStore(ConfigurationSource config,
       AbstractDatanodeDBDefinition dbDef, boolean openReadOnly)
-      throws IOException {
+      throws RocksDatabaseException, CodecException {
     super(dbDef, config, openReadOnly);
   }
 
   @Override
   protected DBStore initDBStore(DBStoreBuilder dbStoreBuilder, 
ManagedDBOptions options, ConfigurationSource config)
-      throws IOException {
+      throws RocksDatabaseException, CodecException {
     AbstractDatanodeDBDefinition dbDefinition = this.getDbDef();
     if (dbDefinition instanceof DatanodeSchemaOneDBDefinition ||
         dbDefinition instanceof DatanodeSchemaTwoDBDefinition) {
@@ -182,15 +183,12 @@ protected Table<String, Long> 
getFinalizeBlocksTableWithIterator() {
     return this.finalizeBlocksTableWithIterator;
   }
 
-  protected static void checkTableStatus(Table<?, ?> table, String name)
-          throws IOException {
-    String logMessage = "Unable to get a reference to %s table. Cannot " +
-            "continue.";
-    String errMsg = "Inconsistent DB state, Table - %s. Please check the" +
-            " logs for more info.";
+  static void checkTableStatus(Table<?, ?> table, String name) throws 
RocksDatabaseException {
     if (table == null) {
-      LOG.error(String.format(logMessage, name));
-      throw new IOException(String.format(errMsg, name));
+      final RocksDatabaseException e = new RocksDatabaseException(
+          "Failed to get table " + name + ": Please check the logs for more 
info.");
+      LOG.error("", e);
+      throw e;
     }
   }
 
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java
index 0033b62a891..8e2f0349898 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java
@@ -21,12 +21,13 @@
 import static 
org.apache.hadoop.hdds.utils.db.DBStoreBuilder.HDDS_DEFAULT_DB_PROFILE;
 
 import com.google.common.annotations.VisibleForTesting;
-import java.io.IOException;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.utils.db.BatchOperationHandler;
+import org.apache.hadoop.hdds.utils.db.CodecException;
 import org.apache.hadoop.hdds.utils.db.DBDefinition;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
+import org.apache.hadoop.hdds.utils.db.RocksDatabaseException;
 import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
 import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
 import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
@@ -41,10 +42,10 @@ public abstract class AbstractRDBStore<DEF extends 
DBDefinition> implements DBSt
   private final DEF dbDef;
   private final ManagedColumnFamilyOptions cfOptions;
   private static DatanodeDBProfile dbProfile;
-  private final boolean openReadOnly;
   private volatile DBStore store;
 
-  protected AbstractRDBStore(DEF dbDef, ConfigurationSource config, boolean 
openReadOnly) throws IOException {
+  protected AbstractRDBStore(DEF dbDef, ConfigurationSource config, boolean 
openReadOnly)
+      throws RocksDatabaseException, CodecException {
     dbProfile = DatanodeDBProfile.getProfile(config.getEnum(HDDS_DB_PROFILE, 
HDDS_DEFAULT_DB_PROFILE));
 
     // The same config instance is used on each datanode, so we can share the
@@ -52,13 +53,7 @@ protected AbstractRDBStore(DEF dbDef, ConfigurationSource 
config, boolean openRe
     // for all containers on a datanode.
     cfOptions = dbProfile.getColumnFamilyOptions(config);
     this.dbDef = dbDef;
-    this.openReadOnly = openReadOnly;
-    start(config);
-  }
 
-  @Override
-  public void start(ConfigurationSource config)
-      throws IOException {
     if (this.store == null) {
       ManagedDBOptions options = dbProfile.getDBOptions();
       options.setCreateIfMissing(true);
@@ -80,10 +75,11 @@ public void start(ConfigurationSource config)
   }
 
   protected abstract DBStore initDBStore(DBStoreBuilder dbStoreBuilder, 
ManagedDBOptions options,
-                                         ConfigurationSource config) throws 
IOException;
+                                         ConfigurationSource config)
+      throws RocksDatabaseException, CodecException;
 
   @Override
-  public synchronized void stop() throws Exception {
+  public synchronized void stop() {
     if (store != null) {
       store.close();
       store = null;
@@ -109,23 +105,23 @@ public BatchOperationHandler getBatchHandler() {
   }
 
   @Override
-  public void close() throws IOException {
+  public void close() {
     this.store.close();
     this.cfOptions.close();
   }
 
   @Override
-  public void flushDB() throws IOException {
+  public void flushDB() throws RocksDatabaseException {
     store.flushDB();
   }
 
   @Override
-  public void flushLog(boolean sync) throws IOException {
+  public void flushLog(boolean sync) throws RocksDatabaseException {
     store.flushLog(sync);
   }
 
   @Override
-  public void compactDB() throws IOException {
+  public void compactDB() throws RocksDatabaseException {
     store.compactDB();
   }
 
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DBStoreManager.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DBStoreManager.java
index 2d7d7b86102..02138c23e26 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DBStoreManager.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DBStoreManager.java
@@ -17,29 +17,19 @@
 
 package org.apache.hadoop.ozone.container.metadata;
 
-import java.io.Closeable;
 import java.io.IOException;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.utils.db.BatchOperationHandler;
 import org.apache.hadoop.hdds.utils.db.DBStore;
+import org.apache.ratis.util.UncheckedAutoCloseable;
 
 /**
  * Interface for interacting with datanode databases.
  */
-public interface DBStoreManager extends Closeable {
-
-  /**
-   * Start datanode manager.
-   *
-   * @param configuration - Configuration
-   * @throws IOException - Unable to start datanode store.
-   */
-  void start(ConfigurationSource configuration) throws IOException;
-
+public interface DBStoreManager extends UncheckedAutoCloseable {
   /**
    * Stop datanode manager.
    */
-  void stop() throws Exception;
+  void stop();
 
   /**
    * Get datanode store.
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java
index 072185a766b..2e1e0386aab 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/WitnessedContainerMetadataStoreImpl.java
@@ -23,8 +23,10 @@
 import java.util.concurrent.ConcurrentMap;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.utils.db.CodecException;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
+import org.apache.hadoop.hdds.utils.db.RocksDatabaseException;
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
 
@@ -57,13 +59,14 @@ public static WitnessedContainerMetadataStore 
get(ConfigurationSource conf)
     }
   }
 
-  private WitnessedContainerMetadataStoreImpl(ConfigurationSource config, 
boolean openReadOnly) throws IOException {
+  private WitnessedContainerMetadataStoreImpl(ConfigurationSource config, 
boolean openReadOnly)
+      throws RocksDatabaseException, CodecException {
     super(WitnessedContainerDBDefinition.get(), config, openReadOnly);
   }
 
   @Override
   protected DBStore initDBStore(DBStoreBuilder dbStoreBuilder, 
ManagedDBOptions options, ConfigurationSource config)
-      throws IOException {
+      throws RocksDatabaseException, CodecException {
     final DBStore dbStore = dbStoreBuilder.build();
     this.containerIdsTable = 
this.getDbDef().getContainerIdsTable().getTable(dbStore);
     return dbStore;
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBColumnFamilyDefinition.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBColumnFamilyDefinition.java
index 3d0c2fe196c..d5c61827258 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBColumnFamilyDefinition.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBColumnFamilyDefinition.java
@@ -19,7 +19,6 @@
 
 import static org.apache.ratis.util.JavaUtils.getClassSimpleName;
 
-import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
@@ -72,11 +71,12 @@ public DBColumnFamilyDefinition(String tableName, 
Codec<KEY> keyCodec, Codec<VAL
         DBColumnFamilyDefinition::getName);
   }
 
-  public TypedTable<KEY, VALUE> getTable(DBStore db) throws IOException {
+  public TypedTable<KEY, VALUE> getTable(DBStore db) throws 
RocksDatabaseException, CodecException {
     return db.getTable(tableName, keyCodec, valueCodec);
   }
 
-  public TypedTable<KEY, VALUE> getTable(DBStore db, CacheType cacheType) 
throws IOException {
+  public TypedTable<KEY, VALUE> getTable(DBStore db, CacheType cacheType)
+      throws RocksDatabaseException, CodecException {
     return db.getTable(tableName, keyCodec, valueCodec, cacheType);
   }
 
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java
index e33ef15898f..0fb91f42d90 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java
@@ -17,16 +17,15 @@
 
 package org.apache.hadoop.hdds.utils.db;
 
-import java.io.Closeable;
 import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
+import java.util.List;
 import java.util.Map;
 import org.apache.hadoop.hdds.annotation.InterfaceStability;
 import org.apache.hadoop.hdds.utils.db.cache.TableCache;
 import org.apache.hadoop.hdds.utils.db.cache.TableCache.CacheType;
 import org.apache.hadoop.hdds.utils.db.managed.ManagedCompactRangeOptions;
 import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer;
+import org.apache.ratis.util.UncheckedAutoCloseable;
 
 /**
  * The DBStore interface provides the ability to create Tables, which store
@@ -36,20 +35,19 @@
  *
  */
 @InterfaceStability.Evolving
-public interface DBStore extends Closeable, BatchOperationHandler {
+public interface DBStore extends UncheckedAutoCloseable, BatchOperationHandler 
{
 
   /**
    * Gets an existing TableStore.
    *
    * @param name - Name of the TableStore to get
    * @return - TableStore.
-   * @throws IOException on Failure
    */
-  Table<byte[], byte[]> getTable(String name) throws IOException;
+  Table<byte[], byte[]> getTable(String name) throws RocksDatabaseException;
 
   /** The same as getTable(name, keyCodec, valueCodec, 
CacheType.PARTIAL_CACHE). */
   default <KEY, VALUE> TypedTable<KEY, VALUE> getTable(String name, Codec<KEY> 
keyCodec, Codec<VALUE> valueCodec)
-      throws IOException {
+      throws RocksDatabaseException, CodecException {
     return getTable(name, keyCodec, valueCodec, CacheType.PARTIAL_CACHE);
   }
 
@@ -61,31 +59,29 @@ default <KEY, VALUE> TypedTable<KEY, VALUE> getTable(String 
name, Codec<KEY> key
    * @param valueCodec - value codec
    * @param cacheType - cache type
    * @return - Table Store
-   * @throws IOException
    */
   <KEY, VALUE> TypedTable<KEY, VALUE> getTable(
-      String name, Codec<KEY> keyCodec, Codec<VALUE> valueCodec, 
TableCache.CacheType cacheType) throws IOException;
+      String name, Codec<KEY> keyCodec, Codec<VALUE> valueCodec, 
TableCache.CacheType cacheType)
+      throws RocksDatabaseException, CodecException;
 
   /**
    * Lists the Known list of Tables in a DB.
    *
    * @return List of Tables, in case of Rocks DB and LevelDB we will return at
    * least one entry called DEFAULT.
-   * @throws IOException on Failure
    */
-  ArrayList<Table> listTables() throws IOException;
+  List<Table<?, ?>> listTables();
 
   /**
    * Flush the DB buffer onto persistent storage.
-   * @throws IOException
    */
-  void flushDB() throws IOException;
+  void flushDB() throws RocksDatabaseException;
 
   /**
    * Flush the outstanding I/O operations of the DB.
    * @param sync if true will sync the outstanding I/Os to the disk.
    */
-  void flushLog(boolean sync) throws IOException;
+  void flushLog(boolean sync) throws RocksDatabaseException;
 
   /**
    * Returns the RocksDB checkpoint differ.
@@ -94,76 +90,30 @@ <KEY, VALUE> TypedTable<KEY, VALUE> getTable(
 
   /**
    * Compact the entire database.
-   *
-   * @throws IOException on Failure
    */
-  void compactDB() throws IOException;
+  void compactDB() throws RocksDatabaseException;
 
   /**
    * Compact the specific table.
    *
    * @param tableName - Name of the table to compact.
-   * @throws IOException on Failure
    */
-  void compactTable(String tableName) throws IOException;
+  void compactTable(String tableName) throws RocksDatabaseException;
 
   /**
    * Compact the specific table.
    *
    * @param tableName - Name of the table to compact.
    * @param options - Options for the compact operation.
-   * @throws IOException on Failure
-   */
-  void compactTable(String tableName, ManagedCompactRangeOptions options) 
throws IOException;
-
-  /**
-   * Moves a key from the Source Table to the destination Table.
-   *
-   * @param key - Key to move.
-   * @param source - Source Table.
-   * @param dest - Destination Table.
-   * @throws IOException on Failure
-   */
-  <KEY, VALUE> void move(KEY key, Table<KEY, VALUE> source,
-                         Table<KEY, VALUE> dest) throws IOException;
-
-  /**
-   * Moves a key from the Source Table to the destination Table and updates the
-   * destination to the new value.
-   *
-   * @param key - Key to move.
-   * @param value - new value to write to the destination table.
-   * @param source - Source Table.
-   * @param dest - Destination Table.
-   * @throws IOException on Failure
-   */
-  <KEY, VALUE> void move(KEY key, VALUE value, Table<KEY, VALUE> source,
-                         Table<KEY, VALUE> dest)
-      throws IOException;
-
-  /**
-   * Moves a key from the Source Table to the destination Table and updates the
-   * destination with the new key name and value.
-   * This is similar to deleting an entry in one table and adding an entry in
-   * another table, here it is done atomically.
-   *
-   * @param sourceKey - Key to move.
-   * @param destKey - Destination key name.
-   * @param value - new value to write to the destination table.
-   * @param source - Source Table.
-   * @param dest - Destination Table.
-   * @throws IOException on Failure
    */
-  <KEY, VALUE> void move(KEY sourceKey, KEY destKey, VALUE value,
-                         Table<KEY, VALUE> source, Table<KEY, VALUE> dest)
-      throws IOException;
+  void compactTable(String tableName, ManagedCompactRangeOptions options) 
throws RocksDatabaseException;
 
   /**
    * Returns an estimated count of keys in this DB.
    *
    * @return long, estimate of keys in the DB.
    */
-  long getEstimatedKeyCount() throws IOException;
+  long getEstimatedKeyCount() throws RocksDatabaseException;
 
 
   /**
@@ -172,7 +122,7 @@ <KEY, VALUE> void move(KEY sourceKey, KEY destKey, VALUE 
value,
    * @return An object that encapsulates the checkpoint information along with
    * location.
    */
-  DBCheckpoint getCheckpoint(boolean flush) throws IOException;
+  DBCheckpoint getCheckpoint(boolean flush) throws RocksDatabaseException;
 
   /**
    * Get current snapshot of DB store as an artifact stored on
@@ -180,7 +130,7 @@ <KEY, VALUE> void move(KEY sourceKey, KEY destKey, VALUE 
value,
    * @return An object that encapsulates the checkpoint information along with
    * location.
    */
-  DBCheckpoint getCheckpoint(String parentDir, boolean flush) throws 
IOException;
+  DBCheckpoint getCheckpoint(String parentDir, boolean flush) throws 
RocksDatabaseException;
 
   /**
    * Get DB Store location.
@@ -199,13 +149,13 @@ <KEY, VALUE> void move(KEY sourceKey, KEY destKey, VALUE 
value,
    * Get data written to DB since a specific sequence number.
    */
   DBUpdatesWrapper getUpdatesSince(long sequenceNumber)
-      throws IOException;
+      throws SequenceNumberNotFoundException;
 
   /**
    * Get limited data written to DB since a specific sequence number.
    */
   DBUpdatesWrapper getUpdatesSince(long sequenceNumber, long limitCount)
-      throws IOException;
+      throws SequenceNumberNotFoundException;
 
   /**
    * Return if the underlying DB is closed. This call is thread safe.
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
index 45ed81c8772..a561023f530 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
@@ -30,7 +30,6 @@
 
 import com.google.common.base.Preconditions;
 import java.io.File;
-import java.io.IOException;
 import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.util.ArrayList;
@@ -107,8 +106,8 @@ public final class DBStoreBuilder {
   /**
    * Create DBStoreBuilder from a generic DBDefinition.
    */
-  public static DBStore createDBStore(ConfigurationSource configuration,
-      DBDefinition definition) throws IOException {
+  public static DBStore createDBStore(ConfigurationSource configuration, 
DBDefinition definition)
+      throws RocksDatabaseException {
     return newBuilder(configuration, definition, null, null).build();
   }
 
@@ -200,10 +199,10 @@ private void setDBOptionsProps(ManagedDBOptions 
dbOptions) {
    *
    * @return DBStore
    */
-  public RDBStore build() throws IOException {
+  public RDBStore build() throws RocksDatabaseException {
     if (StringUtils.isBlank(dbname) || (dbPath == null)) {
       LOG.error("Required Parameter missing.");
-      throw new IOException("Required parameter is missing. Please make sure "
+      throw new RocksDatabaseException("Required parameter is missing. Please 
make sure "
           + "Path and DB name is provided.");
     }
 
@@ -219,7 +218,7 @@ public RDBStore build() throws IOException {
 
       File dbFile = getDBFile();
       if (!dbFile.getParentFile().exists()) {
-        throw new IOException("The DB destination directory should exist.");
+        throw new RocksDatabaseException("The DB destination directory should 
exist.");
       }
 
       return new RDBStore(dbFile, rocksDBOption, statistics, writeOptions, 
tableConfigs,
@@ -432,7 +431,7 @@ private ManagedDBOptions getDBOptionsFromFile(
           if (option != null) {
             LOG.info("Using RocksDB DBOptions from {}.ini file", dbname);
           }
-        } catch (IOException ex) {
+        } catch (RocksDatabaseException ex) {
           LOG.info("Unable to read RocksDB DBOptions from {}", dbname, ex);
         } finally {
           columnFamilyDescriptors.forEach(d -> d.getOptions().close());
@@ -443,15 +442,15 @@ private ManagedDBOptions getDBOptionsFromFile(
     return option;
   }
 
-  private File getDBFile() throws IOException {
+  private File getDBFile() throws RocksDatabaseException {
     if (dbPath == null) {
       LOG.error("DB path is required.");
-      throw new IOException("A Path to for DB file is needed.");
+      throw new RocksDatabaseException("A Path to for DB file is needed.");
     }
 
     if (StringUtils.isBlank(dbname)) {
       LOG.error("DBName is a required.");
-      throw new IOException("A valid DB name is required.");
+      throw new RocksDatabaseException("A valid DB name is required.");
     }
     return Paths.get(dbPath.toString(), dbname).toFile();
   }
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
index 6fafee9c2bb..e3853a84211 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
@@ -34,6 +34,7 @@
 import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
@@ -83,8 +84,7 @@ public class RDBStore implements DBStore {
                   boolean createCheckpointDirs,
                   ConfigurationSource configuration,
                   boolean enableRocksDBMetrics)
-
-      throws IOException {
+      throws RocksDatabaseException {
     Preconditions.checkNotNull(dbFile, "DB file location cannot be null");
     Preconditions.checkNotNull(families);
     Preconditions.checkArgument(!families.isEmpty());
@@ -93,7 +93,6 @@ public class RDBStore implements DBStore {
     this.dbOptions = dbOptions;
     this.statistics = statistics;
 
-    Exception exception = null;
     try {
       if (enableCompactionDag) {
         rocksDBCheckpointDiffer = RocksDBCheckpointDifferHolder.getInstance(
@@ -169,21 +168,13 @@ public class RDBStore implements DBStore {
       //Initialize checkpoint manager
       checkPointManager = new RDBCheckpointManager(db, dbLocation.getName());
       rdbMetrics = RDBMetrics.create();
-
-    } catch (RuntimeException e) {
-      exception = e;
-      throw new IllegalStateException("Failed to create RDBStore from " + 
dbFile, e);
-    } catch (Exception e) {
-      exception = e;
-      throw new IOException("Failed to create RDBStore from " + dbFile, e);
-    } finally {
-      if (exception != null) {
-        try {
-          close();
-        } catch (IOException e) {
-          exception.addSuppressed(e);
-        }
+    } catch (IOException | RuntimeException e) {
+      try {
+        close();
+      } catch (Exception suppressed) {
+        e.addSuppressed(suppressed);
       }
+      throw new RocksDatabaseException("Failed to create RDBStore from " + 
dbFile, e);
     }
 
     if (LOG.isDebugEnabled()) {
@@ -215,7 +206,7 @@ public ManagedDBOptions getDbOptions() {
   }
 
   @Override
-  public void compactDB() throws IOException {
+  public void compactDB() throws RocksDatabaseException {
     try (ManagedCompactRangeOptions options =
              new ManagedCompactRangeOptions()) {
       db.compactDB(options);
@@ -223,23 +214,23 @@ public void compactDB() throws IOException {
   }
 
   @Override
-  public void compactTable(String tableName) throws IOException {
+  public void compactTable(String tableName) throws RocksDatabaseException {
     try (ManagedCompactRangeOptions options = new 
ManagedCompactRangeOptions()) {
       compactTable(tableName, options);
     }
   }
 
   @Override
-  public void compactTable(String tableName, ManagedCompactRangeOptions 
options) throws IOException {
+  public void compactTable(String tableName, ManagedCompactRangeOptions 
options) throws RocksDatabaseException {
     RocksDatabase.ColumnFamily columnFamily = db.getColumnFamily(tableName);
     if (columnFamily == null) {
-      throw new IOException("Table not found: " + tableName);
+      throw new RocksDatabaseException("Table not found: " + tableName);
     }
     db.compactRange(columnFamily, null, null, options);
   }
 
   @Override
-  public void close() throws IOException {
+  public void close() {
     if (metrics != null) {
       metrics.unregister();
       metrics = null;
@@ -258,36 +249,7 @@ public void close() throws IOException {
   }
 
   @Override
-  public <K, V> void move(K key, Table<K, V> source,
-                                Table<K, V> dest) throws IOException {
-    try (BatchOperation batchOperation = initBatchOperation()) {
-
-      V value = source.get(key);
-      dest.putWithBatch(batchOperation, key, value);
-      source.deleteWithBatch(batchOperation, key);
-      commitBatchOperation(batchOperation);
-    }
-  }
-
-  @Override
-  public <K, V> void move(K key, V value, Table<K, V> source,
-                                Table<K, V> dest) throws IOException {
-    move(key, key, value, source, dest);
-  }
-
-  @Override
-  public <K, V> void move(K sourceKey, K destKey, V value,
-                                Table<K, V> source,
-                                Table<K, V> dest) throws IOException {
-    try (BatchOperation batchOperation = initBatchOperation()) {
-      dest.putWithBatch(batchOperation, destKey, value);
-      source.deleteWithBatch(batchOperation, sourceKey);
-      commitBatchOperation(batchOperation);
-    }
-  }
-
-  @Override
-  public long getEstimatedKeyCount() throws IOException {
+  public long getEstimatedKeyCount() throws RocksDatabaseException {
     return db.estimateNumKeys();
   }
 
@@ -298,7 +260,7 @@ public BatchOperation initBatchOperation() {
 
   @Override
   public void commitBatchOperation(BatchOperation operation)
-      throws IOException {
+      throws RocksDatabaseException {
     ((RDBBatchOperation) operation).commit(db);
   }
 
@@ -319,8 +281,8 @@ public <K, V> TypedTable<K, V> getTable(
   }
 
   @Override
-  public ArrayList<Table> listTables() {
-    ArrayList<Table> returnList = new ArrayList<>();
+  public List<Table<?, ?>> listTables() {
+    final List<Table<?, ?>> returnList = new ArrayList<>();
     for (ColumnFamily family : getColumnFamilies()) {
       returnList.add(new RDBTable(db, family, rdbMetrics));
     }
@@ -328,19 +290,19 @@ public ArrayList<Table> listTables() {
   }
 
   @Override
-  public void flushDB() throws IOException {
+  public void flushDB() throws RocksDatabaseException {
     db.flush();
   }
 
   @Override
-  public void flushLog(boolean sync) throws IOException {
+  public void flushLog(boolean sync) throws RocksDatabaseException {
     // for RocksDB it is sufficient to flush the WAL as entire db can
     // be reconstructed using it.
     db.flushWal(sync);
   }
 
   @Override
-  public DBCheckpoint getCheckpoint(boolean flush) throws IOException {
+  public DBCheckpoint getCheckpoint(boolean flush) throws 
RocksDatabaseException {
     if (flush) {
       this.flushDB();
     }
@@ -348,14 +310,14 @@ public DBCheckpoint getCheckpoint(boolean flush) throws 
IOException {
   }
 
   @Override
-  public DBCheckpoint getCheckpoint(String parentPath, boolean flush) throws 
IOException {
+  public DBCheckpoint getCheckpoint(String parentPath, boolean flush) throws 
RocksDatabaseException {
     if (flush) {
       this.flushDB();
     }
     return checkPointManager.createCheckpoint(parentPath, null);
   }
 
-  public DBCheckpoint getSnapshot(String name) throws IOException {
+  public DBCheckpoint getSnapshot(String name) throws RocksDatabaseException {
     this.flushLog(true);
     return checkPointManager.createCheckpoint(snapshotsParentDir, name);
   }
@@ -376,13 +338,13 @@ public Collection<ColumnFamily> getColumnFamilies() {
 
   @Override
   public DBUpdatesWrapper getUpdatesSince(long sequenceNumber)
-      throws IOException {
+      throws SequenceNumberNotFoundException {
     return getUpdatesSince(sequenceNumber, Long.MAX_VALUE);
   }
 
   @Override
   public DBUpdatesWrapper getUpdatesSince(long sequenceNumber, long limitCount)
-      throws IOException {
+      throws SequenceNumberNotFoundException {
     if (limitCount <= 0) {
       throw new IllegalArgumentException("Illegal count for getUpdatesSince.");
     }
@@ -451,7 +413,7 @@ public DBUpdatesWrapper getUpdatesSince(long 
sequenceNumber, long limitCount)
       // Throw the exception back to Recon. Expect Recon to fall back to
       // full snapshot.
       throw e;
-    } catch (RocksDBException | IOException e) {
+    } catch (RocksDBException | RocksDatabaseException e) {
       LOG.error("Unable to get delta updates since sequenceNumber {}. "
               + "This exception will not be thrown to the client ",
           sequenceNumber, e);
@@ -479,12 +441,12 @@ public RocksDatabase getDb() {
     return db;
   }
 
-  public String getProperty(String property) throws IOException {
+  public String getProperty(String property) throws RocksDatabaseException {
     return db.getProperty(property);
   }
 
   public String getProperty(ColumnFamily family, String property)
-      throws IOException {
+      throws RocksDatabaseException {
     return db.getProperty(family, property);
   }
 
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
index 58ce5ba2ca2..44da33f9921 100644
--- 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
+++ 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
@@ -23,7 +23,6 @@
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertNull;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
@@ -180,47 +179,6 @@ public void closeUnderlyingDB() throws Exception {
     assertTrue(rdbStore.isClosed());
   }
 
-  @Test
-  public void moveKey() throws Exception {
-    byte[] key =
-        RandomStringUtils.secure().next(10).getBytes(StandardCharsets.UTF_8);
-    byte[] value =
-        RandomStringUtils.secure().next(10).getBytes(StandardCharsets.UTF_8);
-
-    final Table<byte[], byte[]> firstTable = 
rdbStore.getTable(families.get(1));
-    firstTable.put(key, value);
-    final Table<byte[], byte[]> secondTable = 
rdbStore.getTable(families.get(2));
-    rdbStore.move(key, firstTable, secondTable);
-    byte[] newvalue = secondTable.get(key);
-    // Make sure we have value in the second table
-    assertNotNull(newvalue);
-    //and it is same as what we wrote to the FirstTable
-    assertArrayEquals(value, newvalue);
-    // After move this key must not exist in the first table.
-    assertNull(firstTable.get(key));
-  }
-
-  @Test
-  public void moveWithValue() throws Exception {
-    byte[] key =
-        RandomStringUtils.secure().next(10).getBytes(StandardCharsets.UTF_8);
-    byte[] value =
-        RandomStringUtils.secure().next(10).getBytes(StandardCharsets.UTF_8);
-
-    byte[] nextValue =
-        RandomStringUtils.secure().next(10).getBytes(StandardCharsets.UTF_8);
-    Table<byte[], byte[]> firstTable = rdbStore.getTable(families.get(1));
-    firstTable.put(key, value);
-    Table<byte[], byte[]> secondTable = rdbStore.getTable(families.get(2));
-    rdbStore.move(key, nextValue, firstTable, secondTable);
-    byte[] newvalue = secondTable.get(key);
-    // Make sure we have value in the second table
-    assertNotNull(newvalue);
-    //and it is not same as what we wrote to the FirstTable, and equals
-    // the new value.
-    assertArrayEquals(nextValue, newvalue);
-  }
-
   @Test
   public void getEstimatedKeyCount() throws Exception {
     assertNotNull(rdbStore, "DB Store cannot be null");
@@ -248,7 +206,7 @@ public void getTable() throws Exception {
 
   @Test
   public void listTables() throws Exception {
-    List<Table> tableList = rdbStore.listTables();
+    final List<Table<?, ?>> tableList = rdbStore.listTables();
     assertNotNull(tableList, "Table list cannot be null");
     Map<String, Table> hashTable = new HashMap<>();
 
diff --git 
a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabaseException.java
 
b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabaseException.java
index e7906879b4f..33cf3458d00 100644
--- 
a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabaseException.java
+++ 
b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabaseException.java
@@ -28,9 +28,13 @@ private static String getStatus(RocksDBException e) {
     return e.getStatus() == null ? "NULL_STATUS" : 
e.getStatus().getCodeString();
   }
 
-  /** Construct from the given {@link RocksDBException} cause. */
-  public RocksDatabaseException(String message, RocksDBException cause) {
-    super(getStatus(cause) + ": " + message, cause);
+  private static String getMessage(String message, Exception cause) {
+    return cause instanceof RocksDBException ? getStatus((RocksDBException) 
cause) + ": " + message : message;
+  }
+
+  /** Construct from the given cause. */
+  public RocksDatabaseException(String message, Exception cause) {
+    super(getMessage(message, cause), cause);
   }
 
   public RocksDatabaseException(String message) {
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java
index 3018f396d74..6fbc09eb89c 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java
@@ -38,6 +38,7 @@
 import com.google.common.cache.CacheLoader;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.List;
 import java.util.UUID;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeoutException;
@@ -100,7 +101,7 @@ static void beforeAll() throws Exception {
           when(table1.getName()).thenReturn("table1");
           when(table2.getName()).thenReturn("table2");
           when(keyTable.getName()).thenReturn("keyTable"); // This is in 
COLUMN_FAMILIES_TO_TRACK_IN_DAG
-          ArrayList tables = new ArrayList();
+          final List<Table<?, ?>> tables = new ArrayList<>();
           tables.add(table1);
           tables.add(table2);
           tables.add(keyTable);


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@ozone.apache.org
For additional commands, e-mail: commits-h...@ozone.apache.org


Reply via email to