This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 658e139b45 HDDS-7116. Avoid leaking RocksObject from DBProfile (#3673)
658e139b45 is described below

commit 658e139b45ffc276a4eeb0e601ebf7032debbf65
Author: Duong Nguyen <[email protected]>
AuthorDate: Thu Aug 11 11:18:53 2022 -0700

    HDDS-7116. Avoid leaking RocksObject from DBProfile (#3673)
---
 .../common/utils/db/DatanodeDBProfile.java         | 21 +++---
 .../container/metadata/AbstractDatanodeStore.java  |  1 +
 .../container/keyvalue/TestKeyValueContainer.java  | 36 -----------
 .../org/apache/hadoop/hdds/utils/db/DBProfile.java | 19 +++---
 .../hadoop/hdds/utils/db/DBStoreBuilder.java       | 50 +++++++++------
 .../org/apache/hadoop/hdds/utils/db/RDBStore.java  |  2 +-
 .../apache/hadoop/hdds/utils/db/RocksDatabase.java | 37 +++++------
 .../apache/hadoop/hdds/utils/db/TableConfig.java   | 14 +++-
 .../db/managed/ManagedBlockBasedTableConfig.java   | 74 ++++++++++++++++++++++
 .../db/managed/ManagedColumnFamilyOptions.java     | 35 ++++++++++
 pom.xml                                            |  1 -
 11 files changed, 186 insertions(+), 104 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/db/DatanodeDBProfile.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/db/DatanodeDBProfile.java
index 65dde79663..e04c7254e8 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/db/DatanodeDBProfile.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/db/DatanodeDBProfile.java
@@ -21,12 +21,10 @@ package org.apache.hadoop.ozone.container.common.utils.db;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.StorageUnit;
 import org.apache.hadoop.hdds.utils.db.DBProfile;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedBlockBasedTableConfig;
 import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
 import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
 import org.apache.hadoop.hdds.utils.db.managed.ManagedLRUCache;
-import org.rocksdb.BlockBasedTableConfig;
-
-import java.util.concurrent.atomic.AtomicReference;
 
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_METADATA_ROCKSDB_CACHE_SIZE;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_METADATA_ROCKSDB_CACHE_SIZE_DEFAULT;
@@ -106,8 +104,6 @@ public abstract class DatanodeDBProfile {
    * Base profile for datanode storage disks.
    */
   private static final class StorageBasedProfile {
-    private final AtomicReference<ManagedColumnFamilyOptions> cfOptions =
-        new AtomicReference<>();
     private final DBProfile baseProfile;
 
     private StorageBasedProfile(DBProfile profile) {
@@ -120,15 +116,15 @@ public abstract class DatanodeDBProfile {
 
     private ManagedColumnFamilyOptions getColumnFamilyOptions(
         ConfigurationSource config) {
-      cfOptions.updateAndGet(op -> op != null ? op :
-          baseProfile.getColumnFamilyOptions()
-              .setTableFormatConfig(getBlockBasedTableConfig(config)));
-      return cfOptions.get();
+      ManagedColumnFamilyOptions cfOptions =
+          baseProfile.getColumnFamilyOptions();
+      return cfOptions.closeAndSetTableFormatConfig(
+          getBlockBasedTableConfig(config));
     }
 
-    private BlockBasedTableConfig getBlockBasedTableConfig(
+    private ManagedBlockBasedTableConfig getBlockBasedTableConfig(
         ConfigurationSource config) {
-      BlockBasedTableConfig blockBasedTableConfig =
+      ManagedBlockBasedTableConfig blockBasedTableConfig =
           baseProfile.getBlockBasedTableConfig();
       if (config == null) {
         return blockBasedTableConfig;
@@ -138,7 +134,8 @@ public abstract class DatanodeDBProfile {
           .getStorageSize(HDDS_DATANODE_METADATA_ROCKSDB_CACHE_SIZE,
               HDDS_DATANODE_METADATA_ROCKSDB_CACHE_SIZE_DEFAULT,
               StorageUnit.BYTES);
-      blockBasedTableConfig.setBlockCache(new ManagedLRUCache(cacheSize));
+      blockBasedTableConfig.closeAndSetBlockCache(
+          new ManagedLRUCache(cacheSize));
       return blockBasedTableConfig;
     }
   }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
index 9a19faf4d3..79a55ef99d 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
@@ -208,6 +208,7 @@ public abstract class AbstractDatanodeStore implements 
DatanodeStore {
   @Override
   public void close() throws IOException {
     this.store.close();
+    this.cfOptions.close();
   }
 
   @Override
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index f91f0ac3e6..366d75af5b 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.container.keyvalue;
 
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 
@@ -58,7 +57,6 @@ import org.junit.rules.TemporaryFolder;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.mockito.Mockito;
-import org.rocksdb.ColumnFamilyOptions;
 
 import java.io.File;
 
@@ -73,7 +71,6 @@ import java.util.Map;
 import java.util.List;
 import java.util.UUID;
 import java.util.concurrent.atomic.AtomicReference;
-import java.util.function.Supplier;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 import java.util.stream.Stream;
@@ -533,39 +530,6 @@ public class TestKeyValueContainer {
     }
   }
 
-  @Test
-  public void testContainersShareColumnFamilyOptions() {
-    ConfigurationSource conf = new OzoneConfiguration();
-
-    // Make sure ColumnFamilyOptions are same for a particular db profile
-    for (Supplier<DatanodeDBProfile> dbProfileSupplier : new Supplier[] {
-        DatanodeDBProfile.Disk::new, DatanodeDBProfile.SSD::new }) {
-      // ColumnFamilyOptions should be same across configurations
-      ColumnFamilyOptions columnFamilyOptions1 = dbProfileSupplier.get()
-          .getColumnFamilyOptions(new OzoneConfiguration());
-      ColumnFamilyOptions columnFamilyOptions2 = dbProfileSupplier.get()
-          .getColumnFamilyOptions(new OzoneConfiguration());
-      Assert.assertEquals(columnFamilyOptions1, columnFamilyOptions2);
-
-      // ColumnFamilyOptions should be same when queried multiple times
-      // for a particulat configuration
-      columnFamilyOptions1 = dbProfileSupplier.get()
-          .getColumnFamilyOptions(conf);
-      columnFamilyOptions2 = dbProfileSupplier.get()
-          .getColumnFamilyOptions(conf);
-      Assert.assertEquals(columnFamilyOptions1, columnFamilyOptions2);
-    }
-
-    // Make sure ColumnFamilyOptions are different for different db profile
-    DatanodeDBProfile diskProfile = new DatanodeDBProfile.Disk();
-    DatanodeDBProfile ssdProfile = new DatanodeDBProfile.SSD();
-    Assert.assertNotEquals(
-        diskProfile.getColumnFamilyOptions(new OzoneConfiguration()),
-        ssdProfile.getColumnFamilyOptions(new OzoneConfiguration()));
-    Assert.assertNotEquals(diskProfile.getColumnFamilyOptions(conf),
-        ssdProfile.getColumnFamilyOptions(conf));
-  }
-
   @Test
   public void testDBProfileAffectsDBOptions() throws Exception {
     // Create Container 1
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBProfile.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBProfile.java
index 5fe98b9c11..b3dbb857ab 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBProfile.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBProfile.java
@@ -20,11 +20,11 @@
 package org.apache.hadoop.hdds.utils.db;
 
 import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedBlockBasedTableConfig;
 import org.apache.hadoop.hdds.utils.db.managed.ManagedBloomFilter;
 import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
 import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
 import org.apache.hadoop.hdds.utils.db.managed.ManagedLRUCache;
-import org.rocksdb.BlockBasedTableConfig;
 import org.rocksdb.CompactionStyle;
 
 import java.math.BigDecimal;
@@ -80,18 +80,19 @@ public enum DBProfile {
     }
 
     @Override
-    public BlockBasedTableConfig getBlockBasedTableConfig() {
+    public ManagedBlockBasedTableConfig getBlockBasedTableConfig() {
       // Set BlockCacheSize to 256 MB. This should not be an issue for HADOOP.
       final long blockCacheSize = toLong(StorageUnit.MB.toBytes(256.00));
 
       // Set the Default block size to 16KB
       final long blockSize = toLong(StorageUnit.KB.toBytes(16));
 
-      return new BlockBasedTableConfig()
-          .setBlockCache(new ManagedLRUCache(blockCacheSize))
-          .setBlockSize(blockSize)
-          .setPinL0FilterAndIndexBlocksInCache(true)
-          .setFilterPolicy(new ManagedBloomFilter());
+      ManagedBlockBasedTableConfig config = new ManagedBlockBasedTableConfig();
+      config.setBlockCache(new ManagedLRUCache(blockCacheSize))
+            .setBlockSize(blockSize)
+            .setPinL0FilterAndIndexBlocksInCache(true)
+            .setFilterPolicy(new ManagedBloomFilter());
+      return config;
     }
 
   },
@@ -117,7 +118,7 @@ public enum DBProfile {
     }
 
     @Override
-    public BlockBasedTableConfig getBlockBasedTableConfig() {
+    public ManagedBlockBasedTableConfig getBlockBasedTableConfig() {
       return SSD.getBlockBasedTableConfig();
     }
   };
@@ -131,5 +132,5 @@ public enum DBProfile {
 
   public abstract ManagedColumnFamilyOptions getColumnFamilyOptions();
 
-  public abstract BlockBasedTableConfig getBlockBasedTableConfig();
+  public abstract ManagedBlockBasedTableConfig getBlockBasedTableConfig();
 }
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
index 6bd2881bbc..301f06bb0f 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
@@ -29,6 +29,7 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.Set;
 
 import org.apache.hadoop.hdds.HddsConfigKeys;
@@ -71,8 +72,6 @@ public final class DBStoreBuilder {
   // DB PKIProfile used by ROCKDB instances.
   public static final DBProfile HDDS_DEFAULT_DB_PROFILE = DBProfile.DISK;
 
-  // the DBOptions used if the caller does not specify.
-  private final ManagedDBOptions defaultDBOptions;
   // The DBOptions specified by the caller.
   private ManagedDBOptions rocksDBOption;
   // The column family options that will be used for any column families
@@ -90,6 +89,7 @@ public final class DBStoreBuilder {
   private RocksDBConfiguration rocksDBConfiguration;
   // Flag to indicate if the RocksDB should be opened readonly.
   private boolean openReadOnly = false;
+  private final DBProfile defaultCfProfile;
 
   /**
    * Create DBStoreBuilder from a generic DBDefinition.
@@ -130,12 +130,9 @@ public final class DBStoreBuilder {
 
     // Get default DBOptions and ColumnFamilyOptions from the default DB
     // profile.
-    DBProfile dbProfile = this.configuration.getEnum(HDDS_DB_PROFILE,
+    defaultCfProfile = this.configuration.getEnum(HDDS_DB_PROFILE,
           HDDS_DEFAULT_DB_PROFILE);
-    LOG.debug("Default DB profile:{}", dbProfile);
-
-    defaultDBOptions = dbProfile.getDBOptions();
-    setDefaultCFOptions(dbProfile.getColumnFamilyOptions());
+    LOG.debug("Default DB profile:{}", defaultCfProfile);
   }
 
   private void applyDBDefinition(DBDefinition definition) {
@@ -177,20 +174,24 @@ public final class DBStoreBuilder {
 
     Set<TableConfig> tableConfigs = makeTableConfigs();
 
-    if (rocksDBOption == null) {
-      rocksDBOption = getDefaultDBOptions(tableConfigs);
-    }
+    try {
+      if (rocksDBOption == null) {
+        rocksDBOption = getDefaultDBOptions(tableConfigs);
+      }
 
-    ManagedWriteOptions writeOptions = new ManagedWriteOptions();
-    writeOptions.setSync(rocksDBConfiguration.getSyncOption());
+      ManagedWriteOptions writeOptions = new ManagedWriteOptions();
+      writeOptions.setSync(rocksDBConfiguration.getSyncOption());
 
-    File dbFile = getDBFile();
-    if (!dbFile.getParentFile().exists()) {
-      throw new IOException("The DB destination directory should exist.");
-    }
+      File dbFile = getDBFile();
+      if (!dbFile.getParentFile().exists()) {
+        throw new IOException("The DB destination directory should exist.");
+      }
 
-    return new RDBStore(dbFile, rocksDBOption, writeOptions, tableConfigs,
-        registry, openReadOnly);
+      return new RDBStore(dbFile, rocksDBOption, writeOptions, tableConfigs,
+          registry, openReadOnly);
+    } finally {
+      tableConfigs.forEach(TableConfig::close);
+    }
   }
 
   public DBStoreBuilder setName(String name) {
@@ -256,7 +257,7 @@ public final class DBStoreBuilder {
 
     // If default column family was not added, add it with the default options.
     cfOptions.putIfAbsent(DEFAULT_COLUMN_FAMILY_NAME,
-        defaultCfOptions);
+        getDefaultCfOptions());
 
     for (Map.Entry<String, ManagedColumnFamilyOptions> entry:
         cfOptions.entrySet()) {
@@ -265,7 +266,7 @@ public final class DBStoreBuilder {
 
       if (options == null) {
         LOG.debug("using default column family options for table: {}", name);
-        tableConfigs.add(new TableConfig(name, defaultCfOptions));
+        tableConfigs.add(new TableConfig(name, getDefaultCfOptions()));
       } else {
         tableConfigs.add(new TableConfig(name, options));
       }
@@ -274,6 +275,11 @@ public final class DBStoreBuilder {
     return tableConfigs;
   }
 
+  private ManagedColumnFamilyOptions getDefaultCfOptions() {
+    return Optional.ofNullable(defaultCfOptions)
+        .orElseGet(defaultCfProfile::getColumnFamilyOptions);
+  }
+
   /**
    * Attempts to get RocksDB {@link ManagedDBOptions} from an ini config
    * file. If that file does not exist, the value of {@code defaultDBOptions}
@@ -294,7 +300,7 @@ public final class DBStoreBuilder {
     ManagedDBOptions dbOptions = getDBOptionsFromFile(tableConfigs);
 
     if (dbOptions == null) {
-      dbOptions = defaultDBOptions;
+      dbOptions = defaultCfProfile.getDBOptions();
       LOG.debug("Using RocksDB DBOptions from default profile.");
     }
 
@@ -348,6 +354,8 @@ public final class DBStoreBuilder {
           }
         } catch (IOException ex) {
           LOG.info("Unable to read RocksDB DBOptions from {}", dbname, ex);
+        } finally {
+          columnFamilyDescriptors.forEach(d -> d.getOptions().close());
         }
       }
     }
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
index 9618f66cd7..c18354168b 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
@@ -263,7 +263,7 @@ public class RDBStore implements DBStore {
   }
 
   public Collection<ColumnFamily> getColumnFamilies() {
-    return db.getColumnFamilies();
+    return db.getExtraColumnFamilies();
   }
 
   @Override
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
index 730fc6aa62..9a710ec4ea 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
@@ -47,12 +47,12 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.function.Predicate;
 import java.util.function.Supplier;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
 import static org.apache.hadoop.hdds.StringUtils.bytes2String;
+import static 
org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions.closeDeeply;
 import static 
org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator.managed;
 import static 
org.apache.hadoop.hdds.utils.db.managed.ManagedTransactionLogIterator.managed;
 import static org.rocksdb.RocksDB.listColumnFamilies;
@@ -77,11 +77,20 @@ public final class RocksDatabase {
    *
    * @return a list of column families.
    */
-  private static List<TableConfig> getColumnFamilies(File file)
-      throws RocksDBException {
+  private static List<TableConfig> getExtraColumnFamilies(
+      File file, Set<TableConfig> families) throws RocksDBException {
+
+    // This logic has been added to support old column families that have
+    // been removed, or those that may have been created in a future version.
+    // TODO : Revisit this logic during upgrade implementation.
+    Set<String> existingFamilyNames = families.stream()
+        .map(TableConfig::getName)
+        .collect(Collectors.toSet());
     final List<TableConfig> columnFamilies = listColumnFamiliesEmptyOptions(
         file.getAbsolutePath())
         .stream()
+        .map(TableConfig::toName)
+        .filter(familyName -> !existingFamilyNames.contains(familyName))
         .map(TableConfig::newTableConfig)
         .collect(Collectors.toList());
     if (LOG.isDebugEnabled()) {
@@ -112,12 +121,8 @@ public final class RocksDatabase {
     ManagedRocksDB db = null;
     final Map<String, ColumnFamily> columnFamilies = new HashMap<>();
     try {
-      // This logic has been added to support old column families that have
-      // been removed, or those that may have been created in a future version.
-      // TODO : Revisit this logic during upgrade implementation.
-      final Stream<TableConfig> extra = getColumnFamilies(dbFile).stream()
-          .filter(extraColumnFamily(families));
-      descriptors = Stream.concat(families.stream(), extra)
+      final List<TableConfig> extra = getExtraColumnFamilies(dbFile, families);
+      descriptors = Stream.concat(families.stream(), extra.stream())
           .map(TableConfig::getDescriptor)
           .collect(Collectors.toList());
 
@@ -144,7 +149,7 @@ public final class RocksDatabase {
   }
 
   private static void close(ColumnFamilyDescriptor d) {
-    runWithTryCatch(() -> d.getOptions().close(), new Object() {
+    runWithTryCatch(() -> closeDeeply(d.getOptions()), new Object() {
       @Override
       public String toString() {
         return d.getClass() + ":" + bytes2String(d.getName());
@@ -185,16 +190,6 @@ public final class RocksDatabase {
     }
   }
 
-  static Predicate<TableConfig> extraColumnFamily(Set<TableConfig> families) {
-    return f -> {
-      if (families.contains(f)) {
-        return false;
-      }
-      LOG.info("Found an extra column family in existing DB: {}", f);
-      return true;
-    };
-  }
-
   public boolean isClosed() {
     return isClosed.get();
   }
@@ -405,7 +400,7 @@ public final class RocksDatabase {
     return columnFamilies.get(key);
   }
 
-  public Collection<ColumnFamily> getColumnFamilies() {
+  public Collection<ColumnFamily> getExtraColumnFamilies() {
     return Collections.unmodifiableCollection(columnFamilies.values());
   }
 
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java
index 30c91cbecb..3a3b5a3c3a 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java
@@ -29,15 +29,18 @@ import org.rocksdb.ColumnFamilyDescriptor;
 /**
  * Class that maintains Table Configuration.
  */
-public class TableConfig {
-  static TableConfig newTableConfig(byte[] bytes) {
-    return new TableConfig(StringUtils.bytes2String(bytes),
+public class TableConfig implements AutoCloseable {
+  static TableConfig newTableConfig(String name) {
+    return new TableConfig(name,
         DBStoreBuilder.HDDS_DEFAULT_DB_PROFILE.getColumnFamilyOptions());
   }
 
   private final String name;
   private final ManagedColumnFamilyOptions columnFamilyOptions;
 
+  public static String toName(byte[] bytes) {
+    return StringUtils.bytes2String(bytes);
+  }
 
   /**
    * Constructs a Table Config.
@@ -102,4 +105,9 @@ public class TableConfig {
   public String toString() {
     return getName();
   }
+
+  @Override
+  public void close() {
+    columnFamilyOptions.close();
+  }
 }
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedBlockBasedTableConfig.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedBlockBasedTableConfig.java
new file mode 100644
index 0000000000..f3e796d130
--- /dev/null
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedBlockBasedTableConfig.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.hadoop.hdds.utils.db.managed;
+
+import org.rocksdb.BlockBasedTableConfig;
+import org.rocksdb.Cache;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * Managed BlockBasedTableConfig.
+ */
+public class ManagedBlockBasedTableConfig extends BlockBasedTableConfig {
+  private Cache blockCacheHolder;
+  private AtomicBoolean closed = new AtomicBoolean(false);
+
+  public synchronized ManagedBlockBasedTableConfig closeAndSetBlockCache(
+      Cache blockCache) {
+    Cache previous = blockCacheHolder;
+    if (previous.isOwningHandle()) {
+      previous.close();
+    }
+    return setBlockCache(blockCache);
+  }
+
+  @Override
+  public synchronized ManagedBlockBasedTableConfig setBlockCache(
+      Cache blockCache) {
+    // Close the previous Cache before overwriting.
+    Cache previous = blockCacheHolder;
+    if (previous != null && previous.isOwningHandle()) {
+      throw new IllegalStateException("Overriding an unclosed value.");
+    }
+
+    blockCacheHolder = blockCache;
+    super.setBlockCache(blockCache);
+    return this;
+  }
+
+  public boolean isClosed() {
+    return closed.get();
+  }
+
+  /**
+   * Close children resources.
+   * See org.apache.hadoop.hdds.utils.db.DBProfile.getBlockBasedTableConfig
+   */
+  public void close() {
+    if (closed.compareAndSet(false, true)) {
+      if (filterPolicy() != null) {
+        filterPolicy().close();
+      }
+      if (blockCacheHolder != null) {
+        blockCacheHolder.close();
+      }
+    }
+  }
+}
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java
index dcc3d33f2e..588478ab3a 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java
@@ -36,13 +36,48 @@ public class ManagedColumnFamilyOptions extends 
ColumnFamilyOptions {
   @Override
   public ManagedColumnFamilyOptions setTableFormatConfig(
       TableFormatConfig tableFormatConfig) {
+    TableFormatConfig previous = tableFormatConfig();
+    if (previous instanceof ManagedBlockBasedTableConfig) {
+      if (!((ManagedBlockBasedTableConfig) previous).isClosed()) {
+        throw new IllegalStateException("Overriding an unclosed value.");
+      }
+    } else if (previous != null) {
+      throw new UnsupportedOperationException("Overwrite is not supported for "
+          + previous.getClass());
+    }
+
     super.setTableFormatConfig(tableFormatConfig);
     return this;
   }
 
+  public synchronized ManagedColumnFamilyOptions closeAndSetTableFormatConfig(
+      TableFormatConfig tableFormatConfig) {
+    TableFormatConfig previous = tableFormatConfig();
+    if (previous instanceof ManagedBlockBasedTableConfig) {
+      ((ManagedBlockBasedTableConfig) previous).close();
+    }
+    setTableFormatConfig(tableFormatConfig);
+    return this;
+  }
+
+
   @Override
   protected void finalize() throws Throwable {
     ManagedRocksObjectUtils.assertClosed(this);
     super.finalize();
   }
+
+  /**
+   * Close ColumnFamilyOptions and its child resources.
+   * See org.apache.hadoop.hdds.utils.db.DBProfile.getColumnFamilyOptions
+   *
+   * @param options
+   */
+  public static void closeDeeply(ColumnFamilyOptions options) {
+    TableFormatConfig tableFormatConfig = options.tableFormatConfig();
+    if (tableFormatConfig instanceof ManagedBlockBasedTableConfig) {
+      ((ManagedBlockBasedTableConfig) tableFormatConfig).close();
+    }
+    options.close();
+  }
 }
diff --git a/pom.xml b/pom.xml
index e767e77b87..6554c5af90 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1850,7 +1850,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
                     <bannedImport>org.rocksdb.**</bannedImport>
                     <allowedImports>
                       <!-- Allow non-RocksObject classes. -->
-                      
<allowedImport>org.rocksdb.BlockBasedTableConfig</allowedImport>
                       
<allowedImport>org.rocksdb.ColumnFamilyDescriptor</allowedImport>
                       
<allowedImport>org.rocksdb.CompactionStyle</allowedImport>
                       <allowedImport>org.rocksdb.HistogramData</allowedImport>


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to