This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 4fb7054699 HDDS-9283. ManagedReadOptions/ManagedSlice not closed
properly in DBScanner (#5510)
4fb7054699 is described below
commit 4fb705469961f7b7e9429fbd6ab677427415a434
Author: Doroszlai, Attila <[email protected]>
AuthorDate: Mon Oct 30 19:29:33 2023 +0100
HDDS-9283. ManagedReadOptions/ManagedSlice not closed properly in DBScanner
(#5510)
---
.../hdds/utils/db/managed/ManagedReadOptions.java | 13 +++++++++--
.../utils/db/managed/ManagedRocksObjectUtils.java | 25 +++++++++++++---------
.../hadoop/hdds/utils/db/managed/ManagedSlice.java | 15 +++++++++----
.../org/apache/hadoop/ozone/debug/DBScanner.java | 14 ++++++------
4 files changed, 45 insertions(+), 22 deletions(-)
diff --git
a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedReadOptions.java
b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedReadOptions.java
index 583baa9df5..4b445fa36f 100644
---
a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedReadOptions.java
+++
b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedReadOptions.java
@@ -20,17 +20,26 @@ package org.apache.hadoop.hdds.utils.db.managed;
import org.rocksdb.ReadOptions;
+import static
org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.formatStackTrace;
+
/**
* Managed WriteBatch.
*/
public class ManagedReadOptions extends ReadOptions {
+
+ private final StackTraceElement[] elements;
+
public ManagedReadOptions() {
- super();
+ this.elements = ManagedRocksObjectUtils.getStackTrace();
}
@Override
protected void finalize() throws Throwable {
- ManagedRocksObjectUtils.assertClosed(this);
+ ManagedRocksObjectUtils.assertClosed(this, getStackTrace());
super.finalize();
}
+
+ private String getStackTrace() {
+ return formatStackTrace(elements);
+ }
}
diff --git
a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
index 3be9171513..6adf147292 100644
---
a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
+++
b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java
@@ -26,6 +26,7 @@ import org.rocksdb.RocksObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import javax.annotation.Nullable;
import java.io.File;
import java.io.IOException;
import java.time.Duration;
@@ -54,19 +55,23 @@ public final class ManagedRocksObjectUtils {
static void assertClosed(RocksObject rocksObject, String stackTrace) {
ManagedRocksObjectMetrics.INSTANCE.increaseManagedObject();
if (rocksObject.isOwningHandle()) {
- ManagedRocksObjectMetrics.INSTANCE.increaseLeakObject();
- String warning = String.format("%s is not closed properly",
- rocksObject.getClass().getSimpleName());
- if (stackTrace != null && LOG.isDebugEnabled()) {
- String debugMessage = String
- .format("%nStackTrace for unclosed instance: %s", stackTrace);
- warning = warning.concat(debugMessage);
- }
- LOG.warn(warning);
+ reportLeak(rocksObject, stackTrace);
}
}
- static StackTraceElement[] getStackTrace() {
+ static void reportLeak(Object object, String stackTrace) {
+ ManagedRocksObjectMetrics.INSTANCE.increaseLeakObject();
+ String warning = String.format("%s is not closed properly",
+ object.getClass().getSimpleName());
+ if (stackTrace != null && LOG.isDebugEnabled()) {
+ String debugMessage = String
+ .format("%nStackTrace for unclosed instance: %s", stackTrace);
+ warning = warning.concat(debugMessage);
+ }
+ LOG.warn(warning);
+ }
+
+ static @Nullable StackTraceElement[] getStackTrace() {
return HddsUtils.getStackTrace(LOG);
}
diff --git
a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSlice.java
b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSlice.java
index 334fb399f5..19d702253e 100644
---
a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSlice.java
+++
b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSlice.java
@@ -20,13 +20,18 @@ package org.apache.hadoop.hdds.utils.db.managed;
import org.rocksdb.Slice;
+import static
org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.formatStackTrace;
+
/**
* Managed Slice.
*/
public class ManagedSlice extends Slice {
+ private final StackTraceElement[] elements;
+
public ManagedSlice(byte[] var1) {
super(var1);
+ this.elements = ManagedRocksObjectUtils.getStackTrace();
}
@Override
@@ -37,11 +42,13 @@ public class ManagedSlice extends Slice {
@Override
protected void finalize() throws Throwable {
ManagedRocksObjectMetrics.INSTANCE.increaseManagedObject();
- if (this.isOwningHandle()) {
- ManagedRocksObjectMetrics.INSTANCE.increaseLeakObject();
- ManagedRocksObjectUtils.LOG.warn("{} is not closed properly",
- this.getClass().getSimpleName());
+ if (isOwningHandle()) {
+ ManagedRocksObjectUtils.reportLeak(this, getStackTrace());
}
super.finalize();
}
+
+ private String getStackTrace() {
+ return formatStackTrace(elements);
+ }
}
diff --git
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java
index 6f455feba5..229670fcff 100644
---
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java
+++
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hdds.cli.SubcommandWithParent;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition;
import org.apache.hadoop.hdds.utils.db.DBDefinition;
import org.apache.hadoop.hdds.utils.db.FixedLengthStringCodec;
@@ -369,13 +370,16 @@ public class DBScanner implements Callable<Void>,
SubcommandWithParent {
}
ManagedRocksIterator iterator = null;
+ ManagedReadOptions readOptions = null;
+ ManagedSlice slice = null;
try {
if (containerId > 0L && schemaV3) {
// Handle SchemaV3 DN DB
- ManagedReadOptions readOptions = new ManagedReadOptions();
- readOptions.setIterateUpperBound(new ManagedSlice(
+ readOptions = new ManagedReadOptions();
+ slice = new ManagedSlice(
DatanodeSchemaThreeDBDefinition.getContainerKeyPrefixBytes(
- containerId + 1L)));
+ containerId + 1L));
+ readOptions.setIterateUpperBound(slice);
iterator = new ManagedRocksIterator(
rocksDB.get().newIterator(columnFamilyHandle, readOptions));
iterator.get().seek(
@@ -389,9 +393,7 @@ public class DBScanner implements Callable<Void>,
SubcommandWithParent {
return displayTable(iterator, columnFamilyDefinition, schemaV3);
} finally {
- if (iterator != null) {
- iterator.close();
- }
+ IOUtils.closeQuietly(iterator, readOptions, slice);
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]