This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 9018728d9d HDDS-10184. Fix ManagedStatistics not closed properly
(#6055)
9018728d9d is described below
commit 9018728d9db5fc4728d08e306205f4341c11bc7a
Author: Hongbing Wang <[email protected]>
AuthorDate: Thu Jan 25 15:54:30 2024 +0800
HDDS-10184. Fix ManagedStatistics not closed properly (#6055)
---
.../ozone/container/metadata/AbstractDatanodeStore.java | 15 ---------------
.../org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java | 17 +++++++++--------
.../java/org/apache/hadoop/hdds/utils/db/RDBStore.java | 12 +++++++++---
.../org/apache/hadoop/hdds/utils/db/TestRDBStore.java | 2 +-
.../utils/db/managed/TestRocksObjectLeakDetector.java | 2 ++
5 files changed, 21 insertions(+), 27 deletions(-)
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
index 50303bd99b..b451071d70 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
@@ -32,14 +32,12 @@ import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
-import org.apache.hadoop.hdds.utils.db.managed.ManagedStatistics;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList;
import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator;
import
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
import org.apache.hadoop.ozone.container.common.utils.db.DatanodeDBProfile;
import org.rocksdb.InfoLogLevel;
-import org.rocksdb.StatsLevel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -49,9 +47,6 @@ import java.util.NoSuchElementException;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE;
import static
org.apache.hadoop.hdds.utils.db.DBStoreBuilder.HDDS_DEFAULT_DB_PROFILE;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF;
/**
* Implementation of the {@link DatanodeStore} interface that contains
@@ -113,16 +108,6 @@ public abstract class AbstractDatanodeStore implements
DatanodeStore {
options.setMaxTotalWalSize(maxWalSize);
}
- String rocksDbStat = config.getTrimmed(
- OZONE_METADATA_STORE_ROCKSDB_STATISTICS,
- OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT);
-
- if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) {
- ManagedStatistics statistics = new ManagedStatistics();
- statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat));
- options.setStatistics(statistics);
- }
-
DatanodeConfiguration dc =
config.getObject(DatanodeConfiguration.class);
// Config user log files
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
index fe495e7b06..32fcbfec6e 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
@@ -83,6 +83,9 @@ public final class DBStoreBuilder {
// The column family options that will be used for any column families
// added by name only (without specifying options).
private ManagedColumnFamilyOptions defaultCfOptions;
+ // Initialize the Statistics instance if ROCKSDB_STATISTICS enabled
+ private ManagedStatistics statistics;
+
private String dbname;
private Path dbPath;
private String dbJmxBeanNameName;
@@ -188,6 +191,11 @@ public final class DBStoreBuilder {
if (maxNumberOfOpenFiles != null) {
dbOptions.setMaxOpenFiles(maxNumberOfOpenFiles);
}
+ if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) {
+ statistics = new ManagedStatistics();
+ statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat));
+ dbOptions.setStatistics(statistics);
+ }
}
/**
@@ -217,7 +225,7 @@ public final class DBStoreBuilder {
throw new IOException("The DB destination directory should exist.");
}
- return new RDBStore(dbFile, rocksDBOption, writeOptions, tableConfigs,
+ return new RDBStore(dbFile, rocksDBOption, statistics, writeOptions,
tableConfigs,
registry.build(), openReadOnly, maxFSSnapshots, dbJmxBeanNameName,
enableCompactionDag, maxDbUpdatesSizeThreshold, createCheckpointDirs,
configuration, threadNamePrefix);
@@ -413,13 +421,6 @@ public final class DBStoreBuilder {
dbOptions.setWalTtlSeconds(rocksDBConfiguration.getWalTTL());
dbOptions.setWalSizeLimitMB(rocksDBConfiguration.getWalSizeLimit());
- // Create statistics.
- if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) {
- ManagedStatistics statistics = new ManagedStatistics();
- statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat));
- dbOptions.setStatistics(statistics);
- }
-
return dbOptions;
}
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
index 47000f8cbc..6760eb47f4 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hdds.utils.db.cache.TableCache;
import org.apache.hadoop.hdds.utils.db.RocksDatabase.ColumnFamily;
import org.apache.hadoop.hdds.utils.db.managed.ManagedCompactRangeOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedStatistics;
import org.apache.hadoop.hdds.utils.db.managed.ManagedTransactionLogIterator;
import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteOptions;
import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer;
@@ -77,10 +78,11 @@ public class RDBStore implements DBStore {
// number in request to avoid increase in heap memory.
private final long maxDbUpdatesSizeThreshold;
private final ManagedDBOptions dbOptions;
+ private final ManagedStatistics statistics;
private final String threadNamePrefix;
@SuppressWarnings("parameternumber")
- public RDBStore(File dbFile, ManagedDBOptions dbOptions,
+ public RDBStore(File dbFile, ManagedDBOptions dbOptions, ManagedStatistics
statistics,
ManagedWriteOptions writeOptions, Set<TableConfig> families,
CodecRegistry registry, boolean readOnly, int maxFSSnapshots,
String dbJmxBeanName, boolean enableCompactionDag,
@@ -97,6 +99,7 @@ public class RDBStore implements DBStore {
codecRegistry = registry;
dbLocation = dbFile;
this.dbOptions = dbOptions;
+ this.statistics = statistics;
try {
if (enableCompactionDag) {
@@ -119,8 +122,8 @@ public class RDBStore implements DBStore {
if (dbJmxBeanName == null) {
dbJmxBeanName = dbFile.getName();
}
- metrics = RocksDBStoreMetrics.create(dbOptions.statistics(), db,
- dbJmxBeanName);
+ // Use statistics instead of dbOptions.statistics() to avoid repeated
init.
+ metrics = RocksDBStoreMetrics.create(statistics, db, dbJmxBeanName);
if (metrics == null) {
LOG.warn("Metrics registration failed during RocksDB init, " +
"db path :{}", dbJmxBeanName);
@@ -231,6 +234,9 @@ public class RDBStore implements DBStore {
RocksDBCheckpointDifferHolder
.invalidateCacheEntry(rocksDBCheckpointDiffer.getMetadataDir());
}
+ if (statistics != null) {
+ IOUtils.close(LOG, statistics);
+ }
IOUtils.close(LOG, db);
}
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
index 7724835957..ee589ca8a3 100644
---
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
@@ -65,7 +65,7 @@ public class TestRDBStore {
Set<TableConfig> families,
long maxDbUpdatesSizeThreshold)
throws IOException {
- return new RDBStore(dbFile, options, new ManagedWriteOptions(), families,
+ return new RDBStore(dbFile, options, null, new ManagedWriteOptions(),
families,
CodecRegistry.newBuilder().build(), false, 1000, null, false,
maxDbUpdatesSizeThreshold, true, null, "");
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java
index 87fbe23ac7..62912ad4aa 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java
@@ -31,6 +31,7 @@ import java.util.UUID;
import java.util.concurrent.TimeoutException;
import java.util.function.Supplier;
+import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -49,6 +50,7 @@ public class TestRocksObjectLeakDetector {
static void setUp() throws IOException, InterruptedException,
TimeoutException {
OzoneConfiguration conf = new OzoneConfiguration();
+ conf.set(OZONE_METADATA_STORE_ROCKSDB_STATISTICS, "ALL");
String clusterId = UUID.randomUUID().toString();
String scmId = UUID.randomUUID().toString();
String omServiceId = "omServiceId1";
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]