This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 2d0f8cb6af HDDS-12833. Remove the CodecRegistry field from 
DBStoreBuilder (#8327)
2d0f8cb6af is described below

commit 2d0f8cb6afda01a9040c087e7c09bbb4ea701c63
Author: Tsz-Wo Nicholas Sze <[email protected]>
AuthorDate: Tue Apr 29 08:24:04 2025 -0700

    HDDS-12833. Remove the CodecRegistry field from DBStoreBuilder (#8327)
---
 .../ozone/container/metadata/AbstractRDBStore.java |   2 +-
 .../java/org/apache/hadoop/hdds/utils/HAUtils.java |  28 +-----
 .../hadoop/hdds/utils/db/DBStoreBuilder.java       |  58 +++++-------
 .../org/apache/hadoop/hdds/utils/db/RDBStore.java  |   4 +-
 .../hadoop/hdds/utils/db/TestDBStoreBuilder.java   |  17 +---
 .../apache/hadoop/hdds/utils/db/TestRDBStore.java  |   2 +-
 .../hadoop/hdds/scm/TestSCMInstallSnapshot.java    |   4 +-
 .../hadoop/ozone/om/OmMetadataManagerImpl.java     | 105 ++++++---------------
 .../recon/recovery/ReconOmMetadataManagerImpl.java |   9 +-
 .../scm/ReconStorageContainerManagerFacade.java    |  21 +----
 .../freon/containergenerator/GeneratorOm.java      |  14 +--
 11 files changed, 68 insertions(+), 196 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java
index 16052ce7cd..0033b62a89 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractRDBStore.java
@@ -72,7 +72,7 @@ public void start(ConfigurationSource config)
       options.setInfoLogLevel(level);
       options.setMaxLogFileSize(dc.getRocksdbLogMaxFileSize());
       options.setKeepLogFileNum(dc.getRocksdbLogMaxFileNum());
-      this.store = initDBStore(DBStoreBuilder.newBuilder(config, dbDef)
+      this.store = initDBStore(DBStoreBuilder.newBuilder(config, dbDef, null, 
null)
           .setDBOptions(options)
           .setDefaultCFOptions(cfOptions)
           .setOpenReadOnly(openReadOnly), options, config);
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java
index e2db36558b..9c3b4fefe0 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java
@@ -32,7 +32,6 @@
 import java.io.IOException;
 import java.nio.file.Files;
 import java.nio.file.Path;
-import java.nio.file.Paths;
 import java.security.cert.X509Certificate;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -59,11 +58,9 @@
 import 
org.apache.hadoop.hdds.scm.proxy.SCMContainerLocationFailoverProxyProvider;
 import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
 import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition;
 import org.apache.hadoop.hdds.utils.db.DBDefinition;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
-import org.apache.hadoop.hdds.utils.db.RocksDBConfiguration;
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
@@ -250,9 +247,7 @@ private static TransactionInfo getTransactionInfoFromDB(
       DBDefinition definition)
       throws IOException {
 
-    try (DBStore dbStore = loadDB(tempConfig, dbDir.toFile(),
-        dbName, definition)) {
-
+    try (DBStore dbStore = DBStoreBuilder.newBuilder(tempConfig, definition, 
dbName, dbDir).build()) {
       // Get the table name with TransactionInfo as the value. The transaction
       // info table name are different in SCM and SCM.
 
@@ -307,27 +302,6 @@ public static boolean 
verifyTransactionInfo(TransactionInfo transactionInfo,
     return true;
   }
 
-  public static DBStore loadDB(OzoneConfiguration configuration, File metaDir,
-      String dbName, DBDefinition definition) throws IOException {
-    RocksDBConfiguration rocksDBConfiguration =
-        configuration.getObject(RocksDBConfiguration.class);
-    DBStoreBuilder dbStoreBuilder =
-        DBStoreBuilder.newBuilder(configuration, rocksDBConfiguration)
-            .setName(dbName)
-            .setPath(Paths.get(metaDir.getPath()));
-    // Add column family names and codecs.
-    for (DBColumnFamilyDefinition columnFamily : definition
-        .getColumnFamilies()) {
-
-      dbStoreBuilder.addTable(columnFamily.getName());
-      dbStoreBuilder
-          .addCodec(columnFamily.getKeyType(), columnFamily.getKeyCodec());
-      dbStoreBuilder
-          .addCodec(columnFamily.getValueType(), columnFamily.getValueCodec());
-    }
-    return dbStoreBuilder.build();
-  }
-
   public static File getMetaDir(DBDefinition definition,
       OzoneConfiguration configuration) {
     // Set metadata dirs.
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
index abb806bb1a..076b6bc933 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
@@ -29,7 +29,6 @@
 import static org.rocksdb.RocksDB.DEFAULT_COLUMN_FAMILY;
 
 import com.google.common.base.Preconditions;
-import com.google.protobuf.MessageLite;
 import java.io.File;
 import java.io.IOException;
 import java.nio.file.Path;
@@ -91,7 +90,6 @@ public final class DBStoreBuilder {
   // any options. On build, this will be replaced with defaultCfOptions.
   private Map<String, ManagedColumnFamilyOptions> cfOptions;
   private ConfigurationSource configuration;
-  private final CodecRegistry.Builder registry = CodecRegistry.newBuilder();
   private String rocksDbStat;
   // RocksDB column family write buffer size
   private long rocksDbCfWriteBufferSize;
@@ -112,28 +110,23 @@ public final class DBStoreBuilder {
    */
   public static DBStore createDBStore(ConfigurationSource configuration,
       DBDefinition definition) throws IOException {
-    return newBuilder(configuration, definition).build();
+    return newBuilder(configuration, definition, null, null).build();
   }
 
-  public static DBStoreBuilder newBuilder(ConfigurationSource configuration,
-      DBDefinition definition) {
-
-    DBStoreBuilder builder = newBuilder(configuration);
-    builder.applyDBDefinition(definition);
+  public static DBStoreBuilder newBuilder(ConfigurationSource conf, 
DBDefinition definition, File dbDir) {
+    return newBuilder(conf, definition, dbDir.getName(), 
dbDir.getParentFile().toPath());
+  }
 
-    return builder;
+  public static DBStoreBuilder newBuilder(ConfigurationSource conf, 
DBDefinition definition,
+      String name, Path metadataDir) {
+    return newBuilder(conf).apply(definition, name, metadataDir);
   }
 
   public static DBStoreBuilder newBuilder(ConfigurationSource configuration) {
-    return newBuilder(configuration,
+    return new DBStoreBuilder(configuration,
         configuration.getObject(RocksDBConfiguration.class));
   }
 
-  public static DBStoreBuilder newBuilder(ConfigurationSource configuration,
-      RocksDBConfiguration rocksDBConfiguration) {
-    return new DBStoreBuilder(configuration, rocksDBConfiguration);
-  }
-
   private DBStoreBuilder(ConfigurationSource configuration,
       RocksDBConfiguration rocksDBConfiguration) {
     cfOptions = new HashMap<>();
@@ -173,21 +166,23 @@ public static File getDBDirPath(DBDefinition definition,
     return metadataDir;
   }
 
-  private void applyDBDefinition(DBDefinition definition) {
-    // Set metadata dirs.
-    File metadataDir = getDBDirPath(definition, configuration);
+  private DBStoreBuilder apply(DBDefinition definition, String name, Path 
metadataDir) {
+    if (name == null) {
+      name = definition.getName();
+    }
+    setName(name);
 
-    setName(definition.getName());
-    setPath(Paths.get(metadataDir.getPath()));
+    // Set metadata dirs.
+    if (metadataDir == null) {
+      metadataDir = getDBDirPath(definition, configuration).toPath();
+    }
+    setPath(metadataDir);
 
     // Add column family names and codecs.
-    for (DBColumnFamilyDefinition columnFamily :
-        definition.getColumnFamilies()) {
-
+    for (DBColumnFamilyDefinition<?, ?> columnFamily : 
definition.getColumnFamilies()) {
       addTable(columnFamily.getName(), columnFamily.getCfOptions());
-      addCodec(columnFamily.getKeyType(), columnFamily.getKeyCodec());
-      addCodec(columnFamily.getValueType(), columnFamily.getValueCodec());
     }
+    return this;
   }
 
   private void setDBOptionsProps(ManagedDBOptions dbOptions) {
@@ -206,7 +201,7 @@ private void setDBOptionsProps(ManagedDBOptions dbOptions) {
    *
    * @return DBStore
    */
-  public DBStore build() throws IOException {
+  public RDBStore build() throws IOException {
     if (StringUtil.isBlank(dbname) || (dbPath == null)) {
       LOG.error("Required Parameter missing.");
       throw new IOException("Required parameter is missing. Please make sure "
@@ -229,7 +224,7 @@ public DBStore build() throws IOException {
       }
 
       return new RDBStore(dbFile, rocksDBOption, statistics, writeOptions, 
tableConfigs,
-          registry.build(), openReadOnly, dbJmxBeanNameName, 
enableCompactionDag,
+          openReadOnly, dbJmxBeanNameName, enableCompactionDag,
           maxDbUpdatesSizeThreshold, createCheckpointDirs, configuration,
           enableRocksDbMetrics);
     } finally {
@@ -257,15 +252,6 @@ public DBStoreBuilder addTable(String tableName,
     return this;
   }
 
-  public <T> DBStoreBuilder addCodec(Class<T> type, Codec<T> codec) {
-    registry.addCodec(type, codec);
-    return this;
-  }
-
-  public <T extends MessageLite> DBStoreBuilder addProto2Codec(T type) {
-    return addCodec((Class<T>)type.getClass(), Proto2Codec.get(type));
-  }
-
   public DBStoreBuilder setDBOptions(ManagedDBOptions option) {
     rocksDBOption = option;
     return this;
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
index 491f8ff541..b0096730d0 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
@@ -75,9 +75,9 @@ public class RDBStore implements DBStore {
   private final ManagedStatistics statistics;
 
   @SuppressWarnings("parameternumber")
-  public RDBStore(File dbFile, ManagedDBOptions dbOptions, ManagedStatistics 
statistics,
+  RDBStore(File dbFile, ManagedDBOptions dbOptions, ManagedStatistics 
statistics,
                   ManagedWriteOptions writeOptions, Set<TableConfig> families,
-                  CodecRegistry registry, boolean readOnly,
+                  boolean readOnly,
                   String dbJmxBeanName, boolean enableCompactionDag,
                   long maxDbUpdatesSizeThreshold,
                   boolean createCheckpointDirs,
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java
index fb499754e3..6615485383 100644
--- 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java
+++ 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java
@@ -19,7 +19,6 @@
 
 import static org.junit.jupiter.api.Assertions.assertArrayEquals;
 import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertInstanceOf;
 import static org.junit.jupiter.api.Assertions.assertNotEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -203,11 +202,7 @@ public File getDBLocation(ConfigurationSource conf) {
       }
     };
 
-    try (DBStore dbStore = DBStoreBuilder.newBuilder(conf, sampleDB)
-        .setName("SampleStore").setPath(newFolder.toPath()).build()) {
-      assertInstanceOf(RDBStore.class, dbStore);
-
-      RDBStore rdbStore = (RDBStore) dbStore;
+    try (RDBStore rdbStore = DBStoreBuilder.newBuilder(conf, sampleDB, 
"SampleStore", newFolder.toPath()).build()) {
       Collection<RocksDatabase.ColumnFamily> cfFamilies =
           rdbStore.getColumnFamilies();
 
@@ -267,13 +262,9 @@ public File getDBLocation(ConfigurationSource conf) {
       }
     };
 
-    try (DBStore dbStore = DBStoreBuilder.newBuilder(conf, sampleDB)
-            .setName("SampleStore")
-            .disableDefaultCFAutoCompaction(disableAutoCompaction)
-            .setPath(newFolder.toPath()).build()) {
-      assertInstanceOf(RDBStore.class, dbStore);
-
-      RDBStore rdbStore = (RDBStore) dbStore;
+    try (RDBStore rdbStore = DBStoreBuilder.newBuilder(conf, sampleDB, 
"SampleStore", newFolder.toPath())
+        .disableDefaultCFAutoCompaction(disableAutoCompaction)
+        .build()) {
       Collection<RocksDatabase.ColumnFamily> cfFamilies =
               rdbStore.getColumnFamilies();
 
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
index c95fced12f..567c2a13c0 100644
--- 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
+++ 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
@@ -88,7 +88,7 @@ public static RDBStore newRDBStore(File dbFile, 
ManagedDBOptions options,
       long maxDbUpdatesSizeThreshold)
       throws IOException {
     return new RDBStore(dbFile, options, null, new ManagedWriteOptions(), 
families,
-        CodecRegistry.newBuilder().build(), false, null, false,
+        false, null, false,
         maxDbUpdatesSizeThreshold, true, null, true);
   }
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java
index a917f7453e..53a2f75af2 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java
@@ -47,6 +47,7 @@
 import org.apache.hadoop.hdds.utils.TransactionInfo;
 import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
 import org.apache.hadoop.hdds.utils.db.DBStore;
+import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.junit.jupiter.api.AfterAll;
@@ -130,8 +131,7 @@ public void testInstallCheckPoint() throws Exception {
     assertNotNull(parent);
     Path fileName = location.getFileName();
     assertNotNull(fileName);
-    final DBStore db = HAUtils.loadDB(conf, parent.toFile(),
-        fileName.toString(), SCMDBDefinition.get());
+    final DBStore db = DBStoreBuilder.newBuilder(conf, SCMDBDefinition.get(), 
location.toFile()).build();
     // Hack the transaction index in the checkpoint so as to ensure the
     // checkpointed transaction index is higher than when it was downloaded
     // from.
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 219fa080c6..8a7b446c01 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -73,7 +73,6 @@
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
 import org.apache.hadoop.hdds.utils.db.RDBCheckpointUtils;
-import org.apache.hadoop.hdds.utils.db.RocksDBConfiguration;
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
 import org.apache.hadoop.hdds.utils.db.TableIterator;
@@ -86,7 +85,6 @@
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.ozone.om.codec.OMDBDefinition;
-import org.apache.hadoop.ozone.om.codec.TokenIdentifierCodec;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
 import org.apache.hadoop.ozone.om.helpers.BucketLayout;
@@ -372,8 +370,14 @@ private OmMetadataManagerImpl(OzoneConfiguration conf, 
File dir, String name)
     omEpoch = 0;
     int maxOpenFiles = conf.getInt(OZONE_OM_SNAPSHOT_DB_MAX_OPEN_FILES, 
OZONE_OM_SNAPSHOT_DB_MAX_OPEN_FILES_DEFAULT);
 
-    setStore(loadDB(conf, dir, name, true, Optional.of(Boolean.TRUE),
-        maxOpenFiles, false, false, true));
+    this.store = newDBStoreBuilder(conf, name, dir)
+        .setOpenReadOnly(true)
+        .disableDefaultCFAutoCompaction(true)
+        .setMaxNumberOfOpenFiles(maxOpenFiles)
+        .setEnableCompactionDag(false)
+        .setCreateCheckpointDirs(false)
+        .setEnableRocksDbMetrics(true)
+        .build();
     initializeOmTables(CacheType.PARTIAL_CACHE, false);
     perfMetrics = null;
   }
@@ -404,10 +408,18 @@ private OmMetadataManagerImpl(OzoneConfiguration conf, 
File dir, String name)
         // Check if the snapshot directory exists.
         checkSnapshotDirExist(checkpoint);
       }
-      setStore(loadDB(conf, metaDir, dbName, false,
-          java.util.Optional.of(Boolean.TRUE), maxOpenFiles, false, false,
-          conf.getBoolean(OZONE_OM_SNAPSHOT_ROCKSDB_METRICS_ENABLED,
-              OZONE_OM_SNAPSHOT_ROCKSDB_METRICS_ENABLED_DEFAULT)));
+      final boolean enableRocksDBMetrics = conf.getBoolean(
+          OZONE_OM_SNAPSHOT_ROCKSDB_METRICS_ENABLED,
+          OZONE_OM_SNAPSHOT_ROCKSDB_METRICS_ENABLED_DEFAULT);
+      this.store = newDBStoreBuilder(conf, dbName, metaDir)
+          .setOpenReadOnly(false)
+          .disableDefaultCFAutoCompaction(true)
+          .setMaxNumberOfOpenFiles(maxOpenFiles)
+          .setEnableCompactionDag(false)
+          .setCreateCheckpointDirs(false)
+          .setEnableRocksDbMetrics(enableRocksDBMetrics)
+          .build();
+
       initializeOmTables(CacheType.PARTIAL_CACHE, false);
     } catch (IOException e) {
       stop();
@@ -531,74 +543,17 @@ public void start(OzoneConfiguration configuration) 
throws IOException {
   }
 
   public static DBStore loadDB(OzoneConfiguration configuration, File metaDir, 
int maxOpenFiles) throws IOException {
-    return loadDB(configuration, metaDir, OM_DB_NAME, false,
-        java.util.Optional.empty(), maxOpenFiles, true, true, true);
-  }
-
-  @SuppressWarnings("checkstyle:parameternumber")
-  public static DBStore loadDB(OzoneConfiguration configuration, File metaDir,
-      String dbName, boolean readOnly,
-      java.util.Optional<Boolean> disableAutoCompaction,
-      int maxOpenFiles,
-      boolean enableCompactionDag,
-      boolean createCheckpointDirs,
-      boolean enableRocksDBMetrics)
-      throws IOException {
-    RocksDBConfiguration rocksDBConfiguration =
-        configuration.getObject(RocksDBConfiguration.class);
-    DBStoreBuilder dbStoreBuilder = DBStoreBuilder.newBuilder(configuration,
-        rocksDBConfiguration).setName(dbName)
-        .setOpenReadOnly(readOnly)
-        .setPath(Paths.get(metaDir.getPath()))
-        .setEnableCompactionDag(enableCompactionDag)
-        .setCreateCheckpointDirs(createCheckpointDirs)
+    return newDBStoreBuilder(configuration, null, metaDir)
+        .setOpenReadOnly(false)
+        .setEnableCompactionDag(true)
+        .setCreateCheckpointDirs(true)
+        .setEnableRocksDbMetrics(true)
         .setMaxNumberOfOpenFiles(maxOpenFiles)
-        .setEnableRocksDbMetrics(enableRocksDBMetrics);
-    disableAutoCompaction.ifPresent(
-            dbStoreBuilder::disableDefaultCFAutoCompaction);
-    return addOMTablesAndCodecs(dbStoreBuilder).build();
-  }
-
-  public static DBStoreBuilder addOMTablesAndCodecs(DBStoreBuilder builder) {
-
-    return builder.addTable(USER_TABLE)
-        .addTable(VOLUME_TABLE)
-        .addTable(BUCKET_TABLE)
-        .addTable(KEY_TABLE)
-        .addTable(DELETED_TABLE)
-        .addTable(OPEN_KEY_TABLE)
-        .addTable(MULTIPARTINFO_TABLE)
-        .addTable(DELEGATION_TOKEN_TABLE)
-        .addTable(S3_SECRET_TABLE)
-        .addTable(PREFIX_TABLE)
-        .addTable(DIRECTORY_TABLE)
-        .addTable(FILE_TABLE)
-        .addTable(OPEN_FILE_TABLE)
-        .addTable(DELETED_DIR_TABLE)
-        .addTable(TRANSACTION_INFO_TABLE)
-        .addTable(META_TABLE)
-        .addTable(TENANT_ACCESS_ID_TABLE)
-        .addTable(PRINCIPAL_TO_ACCESS_IDS_TABLE)
-        .addTable(TENANT_STATE_TABLE)
-        .addTable(SNAPSHOT_INFO_TABLE)
-        .addTable(SNAPSHOT_RENAMED_TABLE)
-        .addTable(COMPACTION_LOG_TABLE)
-        .addCodec(OzoneTokenIdentifier.class, TokenIdentifierCodec.get())
-        .addCodec(OmKeyInfo.class, OmKeyInfo.getCodec(true))
-        .addCodec(RepeatedOmKeyInfo.class, RepeatedOmKeyInfo.getCodec(true))
-        .addCodec(OmBucketInfo.class, OmBucketInfo.getCodec())
-        .addCodec(OmVolumeArgs.class, OmVolumeArgs.getCodec())
-        .addProto2Codec(PersistedUserVolumeInfo.getDefaultInstance())
-        .addCodec(OmMultipartKeyInfo.class, OmMultipartKeyInfo.getCodec())
-        .addCodec(S3SecretValue.class, S3SecretValue.getCodec())
-        .addCodec(OmPrefixInfo.class, OmPrefixInfo.getCodec())
-        .addCodec(TransactionInfo.class, TransactionInfo.getCodec())
-        .addCodec(OmDirectoryInfo.class, OmDirectoryInfo.getCodec())
-        .addCodec(OmDBTenantState.class, OmDBTenantState.getCodec())
-        .addCodec(OmDBAccessIdInfo.class, OmDBAccessIdInfo.getCodec())
-        .addCodec(OmDBUserPrincipalInfo.class, 
OmDBUserPrincipalInfo.getCodec())
-        .addCodec(SnapshotInfo.class, SnapshotInfo.getCodec())
-        .addCodec(CompactionLogEntry.class, CompactionLogEntry.getCodec());
+        .build();
+  }
+
+  private static DBStoreBuilder newDBStoreBuilder(OzoneConfiguration conf, 
String name, File dir) {
+    return DBStoreBuilder.newBuilder(conf, OMDBDefinition.get(), name, 
dir.toPath());
   }
 
   /**
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
index a9db3657c4..8daa332137 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java
@@ -39,6 +39,7 @@
 import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.hdds.utils.db.cache.TableCache;
 import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.codec.OMDBDefinition;
 import org.apache.hadoop.ozone.om.helpers.BucketLayout;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
@@ -92,13 +93,7 @@ public void start(OzoneConfiguration configuration) throws 
IOException {
    */
   private void initializeNewRdbStore(File dbFile) throws IOException {
     try {
-      DBStoreBuilder dbStoreBuilder =
-          DBStoreBuilder.newBuilder(ozoneConfiguration)
-          .setName(dbFile.getName())
-          .setPath(dbFile.toPath().getParent());
-      addOMTablesAndCodecs(dbStoreBuilder);
-      dbStoreBuilder.addCodec(KeyEntityInfoProtoWrapper.class, 
KeyEntityInfoProtoWrapper.getCodec());
-      setStore(dbStoreBuilder.build());
+      setStore(DBStoreBuilder.newBuilder(ozoneConfiguration, 
OMDBDefinition.get(), dbFile).build());
       LOG.info("Created OM DB handle from snapshot at {}.",
           dbFile.getAbsolutePath());
     } catch (IOException ioEx) {
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
index b6c13505cd..ead10bcb6c 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
@@ -105,7 +105,6 @@
 import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
 import org.apache.hadoop.hdds.utils.IOUtils;
 import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
-import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
 import org.apache.hadoop.hdds.utils.db.Table;
@@ -618,7 +617,8 @@ private void deleteOldSCMDB() throws IOException {
 
   private void initializeNewRdbStore(File dbFile) throws IOException {
     try {
-      final DBStore newStore = createDBAndAddSCMTablesAndCodecs(dbFile, 
ReconSCMDBDefinition.get());
+      final DBStore newStore = DBStoreBuilder.newBuilder(ozoneConfiguration, 
ReconSCMDBDefinition.get(), dbFile)
+          .build();
       Table<UUID, DatanodeDetails> nodeTable =
           ReconSCMDBDefinition.NODES.getTable(dbStore);
       Table<UUID, DatanodeDetails> newNodeTable =
@@ -654,23 +654,6 @@ private void initializeNewRdbStore(File dbFile) throws 
IOException {
     }
   }
 
-  private DBStore createDBAndAddSCMTablesAndCodecs(File dbFile,
-      ReconSCMDBDefinition definition) throws IOException {
-    DBStoreBuilder dbStoreBuilder =
-        DBStoreBuilder.newBuilder(ozoneConfiguration)
-            .setName(dbFile.getName())
-            .setPath(dbFile.toPath().getParent());
-    for (DBColumnFamilyDefinition columnFamily :
-        definition.getColumnFamilies()) {
-      dbStoreBuilder.addTable(columnFamily.getName());
-      dbStoreBuilder.addCodec(columnFamily.getKeyType(),
-          columnFamily.getKeyCodec());
-      dbStoreBuilder.addCodec(columnFamily.getValueType(),
-          columnFamily.getValueCodec());
-    }
-    return dbStoreBuilder.build();
-  }
-
   @Override
   public NodeManager getScmNodeManager() {
     return nodeManager;
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java
index 4f94250c02..39f2b3b3e7 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java
@@ -35,12 +35,10 @@
 import org.apache.hadoop.hdds.utils.db.BatchOperation;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
-import org.apache.hadoop.hdds.utils.db.RocksDBConfiguration;
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.freon.FreonSubcommand;
 import org.apache.hadoop.ozone.om.OMStorage;
-import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
 import org.apache.hadoop.ozone.om.codec.OMDBDefinition;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -96,18 +94,8 @@ public Void call() throws Exception {
 
     File metaDir = OMStorage.getOmDbDir(config);
 
-    RocksDBConfiguration rocksDBConfiguration =
-        config.getObject(RocksDBConfiguration.class);
+    omDb = DBStoreBuilder.newBuilder(config, OMDBDefinition.get(), OM_DB_NAME, 
metaDir.toPath()).build();
 
-    DBStoreBuilder dbStoreBuilder =
-        DBStoreBuilder.newBuilder(config,
-            rocksDBConfiguration)
-            .setName(OM_DB_NAME)
-            .setPath(metaDir.toPath());
-
-    OmMetadataManagerImpl.addOMTablesAndCodecs(dbStoreBuilder);
-
-    omDb = dbStoreBuilder.build();
 
     // initialization: create one bucket and volume in OM.
     writeOmBucketVolume();


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to