This is an automated email from the ASF dual-hosted git repository.
swamirishi pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 13fd2c6028a HDDS-14162. Fix Native Jni Lib to read SST files using
CodecBuffer (#9489)
13fd2c6028a is described below
commit 13fd2c6028a63d55f211bff5c3e22355dac9dcdd
Author: Swaminathan Balachandran <[email protected]>
AuthorDate: Sun Dec 21 08:38:47 2025 -0500
HDDS-14162. Fix Native Jni Lib to read SST files using CodecBuffer (#9489)
---
.../org/apache/hadoop/hdds/utils/db/Buffer.java | 80 +++++++++++++++++++
.../hdds/utils/db/RDBStoreCodecBufferIterator.java | 60 --------------
.../hadoop/hdds/utils/db/TestCodecBufferCodec.java | 17 ++++
.../{managed => }/ManagedRawSSTFileIterator.java | 57 +++++++++-----
.../db/{managed => }/ManagedRawSSTFileReader.java | 15 ++--
.../hdds/utils/db/{managed => }/package-info.java | 4 +-
.../src/main/native/ManagedRawSSTFileIterator.cpp | 92 ++++++++++++----------
.../src/main/native/ManagedRawSSTFileReader.cpp | 8 +-
.../hadoop/hdds/utils/TestNativeLibraryLoader.java | 2 +-
.../TestManagedRawSSTFileIterator.java | 22 ++++--
.../hdds/utils/db/ManagedSstFileIterator.java | 90 +++++++++++++++++++++
.../hdds/utils/db}/SstFileSetReader.java | 62 +++------------
.../ozone/rocksdiff/RocksDBCheckpointDiffer.java | 31 +++-----
.../hadoop/hdds/utils/db/TestRDBSstFileWriter.java | 10 +--
.../hdds/utils/db}/TestSstFileSetReader.java | 8 +-
.../apache/ozone/rocksdiff/TestCompactionDag.java | 2 +-
.../rocksdiff/TestRocksDBCheckpointDiffer.java | 69 ++++++++--------
.../hadoop/ozone/om/snapshot/TestOmSnapshot.java | 13 +--
.../ozone/om/snapshot/SnapshotDiffManager.java | 4 +-
.../om/snapshot/defrag/SnapshotDefragService.java | 4 +-
.../ozone/om/snapshot/TestSnapshotDiffManager.java | 2 +-
.../snapshot/defrag/TestSnapshotDefragService.java | 2 +-
22 files changed, 385 insertions(+), 269 deletions(-)
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Buffer.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Buffer.java
new file mode 100644
index 00000000000..77df1fb9a44
--- /dev/null
+++
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Buffer.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.utils.db;
+
+import org.apache.ratis.util.Preconditions;
+
+class Buffer {
+ private final CodecBuffer.Capacity initialCapacity;
+ private final PutToByteBuffer<RuntimeException> source;
+ private CodecBuffer buffer;
+
+ Buffer(CodecBuffer.Capacity initialCapacity,
+ PutToByteBuffer<RuntimeException> source) {
+ this.initialCapacity = initialCapacity;
+ this.source = source;
+ }
+
+ void release() {
+ if (buffer != null) {
+ buffer.release();
+ }
+ }
+
+ private void prepare() {
+ if (buffer == null) {
+ allocate();
+ } else {
+ buffer.clear();
+ }
+ }
+
+ private void allocate() {
+ if (buffer != null) {
+ buffer.release();
+ }
+ buffer = CodecBuffer.allocateDirect(-initialCapacity.get());
+ }
+
+ CodecBuffer getFromDb() {
+ if (source == null) {
+ return null;
+ }
+
+ for (prepare(); ; allocate()) {
+ final Integer required = buffer.putFromSource(source);
+ if (required == null) {
+ return null; // the source is unavailable
+ } else if (required == buffer.readableBytes()) {
+ return buffer; // buffer size is big enough
+ }
+ // buffer size too small, try increasing the capacity.
+ if (buffer.setCapacity(required)) {
+ buffer.clear();
+ // retry with the new capacity
+ final int retried = buffer.putFromSource(source);
+ Preconditions.assertSame(required.intValue(), retried, "required");
+ return buffer;
+ }
+
+ // failed to increase the capacity
+ // increase initial capacity and reallocate it
+ initialCapacity.increase(required);
+ }
+ }
+}
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreCodecBufferIterator.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreCodecBufferIterator.java
index 4f4f959938e..aa703249ebe 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreCodecBufferIterator.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreCodecBufferIterator.java
@@ -97,64 +97,4 @@ public void close() {
valueBuffer.release();
}
}
-
- static class Buffer {
- private final CodecBuffer.Capacity initialCapacity;
- private final PutToByteBuffer<RuntimeException> source;
- private CodecBuffer buffer;
-
- Buffer(CodecBuffer.Capacity initialCapacity,
- PutToByteBuffer<RuntimeException> source) {
- this.initialCapacity = initialCapacity;
- this.source = source;
- }
-
- void release() {
- if (buffer != null) {
- buffer.release();
- }
- }
-
- private void prepare() {
- if (buffer == null) {
- allocate();
- } else {
- buffer.clear();
- }
- }
-
- private void allocate() {
- if (buffer != null) {
- buffer.release();
- }
- buffer = CodecBuffer.allocateDirect(-initialCapacity.get());
- }
-
- CodecBuffer getFromDb() {
- if (source == null) {
- return null;
- }
-
- for (prepare(); ; allocate()) {
- final Integer required = buffer.putFromSource(source);
- if (required == null) {
- return null; // the source is unavailable
- } else if (required == buffer.readableBytes()) {
- return buffer; // buffer size is big enough
- }
- // buffer size too small, try increasing the capacity.
- if (buffer.setCapacity(required)) {
- buffer.clear();
- // retry with the new capacity
- final int retried = buffer.putFromSource(source);
- Preconditions.assertSame(required.intValue(), retried, "required");
- return buffer;
- }
-
- // failed to increase the capacity
- // increase initial capacity and reallocate it
- initialCapacity.increase(required);
- }
- }
- }
}
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestCodecBufferCodec.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestCodecBufferCodec.java
index 03a2bc4f4ba..e147bba9370 100644
---
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestCodecBufferCodec.java
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestCodecBufferCodec.java
@@ -17,10 +17,12 @@
package org.apache.hadoop.hdds.utils.db;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertSame;
import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.CsvSource;
import org.junit.jupiter.params.provider.ValueSource;
/**
@@ -50,4 +52,19 @@ public void testFromPersistedFormat(boolean direct) throws
CodecException {
assertEquals(testString, value);
}
}
+
+ @ParameterizedTest
+ @CsvSource(value = {"0,true", "0,false", "1,true", "1,false", "10,true",
"10,false"})
+ public void testCodecBufferAllocateByteArray(int length, boolean direct)
throws CodecException {
+ byte[] arr = new byte[length];
+ Codec<CodecBuffer> codec = CodecBufferCodec.get(direct);
+ for (int i = 0; i < length; i++) {
+ arr[i] = (byte)i;
+ }
+ try (CodecBuffer codecBuffer = codec.fromPersistedFormat(arr)) {
+ assertEquals(length, codecBuffer.asReadOnlyByteBuffer().remaining());
+ assertEquals(direct || length == 0,
codecBuffer.asReadOnlyByteBuffer().isDirect());
+ assertArrayEquals(arr, codecBuffer.getArray());
+ }
+ }
}
diff --git
a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileIterator.java
b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedRawSSTFileIterator.java
similarity index 61%
rename from
hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileIterator.java
rename to
hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedRawSSTFileIterator.java
index 4c66ca5cb43..134f24942da 100644
---
a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileIterator.java
+++
b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedRawSSTFileIterator.java
@@ -15,14 +15,12 @@
* limitations under the License.
*/
-package org.apache.hadoop.hdds.utils.db.managed;
+package org.apache.hadoop.hdds.utils.db;
import com.google.common.primitives.UnsignedLong;
-import java.util.Arrays;
+import java.nio.ByteBuffer;
import java.util.NoSuchElementException;
import java.util.function.Function;
-import org.apache.hadoop.hdds.StringUtils;
-import org.apache.hadoop.hdds.utils.db.IteratorType;
import org.apache.hadoop.ozone.util.ClosableIterator;
/**
@@ -33,20 +31,32 @@ public class ManagedRawSSTFileIterator<T> implements
ClosableIterator<T> {
private final long nativeHandle;
private final Function<KeyValue, T> transformer;
private final IteratorType type;
+ private boolean closed;
+ private final Buffer keyBuffer;
+ private final Buffer valueBuffer;
- ManagedRawSSTFileIterator(long nativeHandle, Function<KeyValue, T>
transformer, IteratorType type) {
+ ManagedRawSSTFileIterator(String name, long nativeHandle, Function<KeyValue,
T> transformer, IteratorType type) {
this.nativeHandle = nativeHandle;
this.transformer = transformer;
this.type = type;
+ this.closed = false;
+ this.keyBuffer = new Buffer(
+ new CodecBuffer.Capacity(name + " iterator-key", 1 << 10),
+ this.type.readKey() ? buffer -> this.getKey(this.nativeHandle, buffer,
buffer.position(),
+ buffer.remaining()) : null);
+ this.valueBuffer = new Buffer(
+ new CodecBuffer.Capacity(name + " iterator-value", 4 << 10),
+ this.type.readValue() ? buffer -> this.getValue(this.nativeHandle,
buffer, buffer.position(),
+ buffer.remaining()) : null);
}
private native boolean hasNext(long handle);
private native void next(long handle);
- private native byte[] getKey(long handle);
+ private native int getKey(long handle, ByteBuffer buffer, int bufferOffset,
int bufferLen);
- private native byte[] getValue(long handle);
+ private native int getValue(long handle, ByteBuffer buffer, int
bufferOffset, int bufferLen);
private native long getSequenceNumber(long handle);
@@ -63,10 +73,10 @@ public T next() {
throw new NoSuchElementException();
}
- KeyValue keyValue = new KeyValue(this.type.readKey() ?
this.getKey(nativeHandle) : null,
+ KeyValue keyValue = new KeyValue(this.type.readKey() ?
this.keyBuffer.getFromDb() : null,
UnsignedLong.fromLongBits(this.getSequenceNumber(this.nativeHandle)),
this.getType(nativeHandle),
- this.type.readValue() ? this.getValue(nativeHandle) : null);
+ this.type.readValue() ? this.valueBuffer.getFromDb() : null);
this.next(nativeHandle);
return this.transformer.apply(keyValue);
}
@@ -74,8 +84,13 @@ public T next() {
private native void closeInternal(long handle);
@Override
- public void close() {
- this.closeInternal(this.nativeHandle);
+ public synchronized void close() {
+ if (!closed) {
+ this.closeInternal(this.nativeHandle);
+ keyBuffer.release();
+ valueBuffer.release();
+ }
+ closed = true;
}
/**
@@ -83,21 +98,21 @@ public void close() {
*/
public static final class KeyValue {
- private final byte[] key;
+ private final CodecBuffer key;
private final UnsignedLong sequence;
private final Integer type;
- private final byte[] value;
+ private final CodecBuffer value;
- private KeyValue(byte[] key, UnsignedLong sequence, Integer type,
- byte[] value) {
+ private KeyValue(CodecBuffer key, UnsignedLong sequence, Integer type,
+ CodecBuffer value) {
this.key = key;
this.sequence = sequence;
this.type = type;
this.value = value;
}
- public byte[] getKey() {
- return key == null ? null : Arrays.copyOf(key, key.length);
+ public CodecBuffer getKey() {
+ return this.key;
}
public UnsignedLong getSequence() {
@@ -108,17 +123,17 @@ public Integer getType() {
return type;
}
- public byte[] getValue() {
- return value == null ? null : Arrays.copyOf(value, value.length);
+ public CodecBuffer getValue() {
+ return value;
}
@Override
public String toString() {
return "KeyValue{" +
- "key=" + (key == null ? null : StringUtils.bytes2String(key)) +
+ "key=" + (key == null ? null :
StringCodec.get().fromCodecBuffer(key)) +
", sequence=" + sequence +
", type=" + type +
- ", value=" + (value == null ? null :
StringUtils.bytes2String(value)) +
+ ", value=" + (value == null ? null :
StringCodec.get().fromCodecBuffer(value)) +
'}';
}
}
diff --git
a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java
b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedRawSSTFileReader.java
similarity index 84%
rename from
hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java
rename to
hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedRawSSTFileReader.java
index 49153781e73..c644bd393b5 100644
---
a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java
+++
b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedRawSSTFileReader.java
@@ -15,7 +15,7 @@
* limitations under the License.
*/
-package org.apache.hadoop.hdds.utils.db.managed;
+package org.apache.hadoop.hdds.utils.db;
import static
org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME;
@@ -24,14 +24,16 @@
import java.util.function.Function;
import org.apache.hadoop.hdds.utils.NativeLibraryLoader;
import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException;
-import org.apache.hadoop.hdds.utils.db.IteratorType;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedSlice;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* JNI for RocksDB RawSSTFileReader.
*/
-public class ManagedRawSSTFileReader<T> implements Closeable {
+public class ManagedRawSSTFileReader implements Closeable {
private static final Logger LOG =
LoggerFactory.getLogger(ManagedRawSSTFileReader.class);
@@ -62,14 +64,15 @@ public ManagedRawSSTFileReader(final ManagedOptions
options, final String fileNa
this.nativeHandle = this.newRawSSTFileReader(options.getNativeHandle(),
fileName, readAheadSize);
}
- public ManagedRawSSTFileIterator<T> newIterator(
+ public <T> ManagedRawSSTFileIterator<T> newIterator(
Function<ManagedRawSSTFileIterator.KeyValue, T> transformerFunction,
ManagedSlice fromSlice, ManagedSlice toSlice, IteratorType type) {
long fromNativeHandle = fromSlice == null ? 0 :
fromSlice.getNativeHandle();
long toNativeHandle = toSlice == null ? 0 : toSlice.getNativeHandle();
LOG.info("Iterating SST file: {} with native lib. " +
- "LowerBound: {}, UpperBound: {}, type : {}", fileName, fromSlice,
toSlice, type);
- return new ManagedRawSSTFileIterator<>(
+ "LowerBound: {}, UpperBound: {}, type : {} with reader handle:
{}", fileName, fromSlice, toSlice, type,
+ this.nativeHandle);
+ return new ManagedRawSSTFileIterator<>(fileName + " " + this.nativeHandle,
newIterator(this.nativeHandle, fromSlice != null,
fromNativeHandle, toSlice != null, toNativeHandle),
transformerFunction, type);
diff --git
a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/package-info.java
b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java
similarity index 86%
rename from
hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/package-info.java
rename to
hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java
index dc54a777de5..42b83808542 100644
---
a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/package-info.java
+++
b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java
@@ -16,6 +16,6 @@
*/
/**
- * This package contains utility classes related to Managed SST dump tool.
+ * Native rocksdb utilities.
*/
-package org.apache.hadoop.hdds.utils.db.managed;
+package org.apache.hadoop.hdds.utils.db;
diff --git
a/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp
b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp
index 1cf22252837..7720e30b411 100644
--- a/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp
+++ b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp
@@ -16,75 +16,87 @@
* limitations under the License.
*/
-#include "org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator.h"
+#include "org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileIterator.h"
#include "rocksdb/options.h"
#include "rocksdb/raw_iterator.h"
#include <string>
#include "cplusplus_to_java_convert.h"
#include <iostream>
-jboolean
Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_hasNext(JNIEnv
*env, jobject obj,
-
jlong native_handle) {
+template <class T>
+static jint copyToDirect(JNIEnv* env, T& source, jobject jtarget, jint
jtarget_off, jint jtarget_len);
+
+jboolean
Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileIterator_hasNext(JNIEnv
*env, jobject obj,
+
jlong native_handle) {
return
static_cast<jboolean>(reinterpret_cast<ROCKSDB_NAMESPACE::RawIterator*>(native_handle)->Valid());
}
-void
Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_next(JNIEnv
*env, jobject obj,
-
jlong native_handle) {
+void
Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileIterator_next(JNIEnv
*env, jobject obj,
+ jlong
native_handle) {
reinterpret_cast<ROCKSDB_NAMESPACE::RawIterator*>(native_handle)->Next();
}
-jbyteArray
Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getKey(JNIEnv
*env,
-
jobject obj,
-
jlong native_handle) {
+jint
Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileIterator_getKey(JNIEnv
*env,
+
jobject obj,
+
jlong native_handle,
+
jobject jtarget,
+
jint jtarget_off, jint jtarget_len) {
ROCKSDB_NAMESPACE::Slice slice =
reinterpret_cast<ROCKSDB_NAMESPACE::RawIterator*>(native_handle)->key();
- jbyteArray jkey = env->NewByteArray(static_cast<jsize>(slice.size()));
- if (jkey == nullptr) {
- // exception thrown: OutOfMemoryError
- return nullptr;
- }
- env->SetByteArrayRegion(
- jkey, 0, static_cast<jsize>(slice.size()),
- const_cast<jbyte*>(reinterpret_cast<const jbyte*>(slice.data())));
- return jkey;
+ return copyToDirect(env, slice, jtarget, jtarget_off, jtarget_len);
}
-jbyteArray
Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getValue(JNIEnv
*env,
-
jobject obj,
-
jlong native_handle) {
+jint
Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileIterator_getValue(JNIEnv
*env,
+
jobject obj,
+
jlong native_handle,
+
jobject jtarget,
+
jint jtarget_off, jint jtarget_len) {
ROCKSDB_NAMESPACE::Slice slice =
reinterpret_cast<ROCKSDB_NAMESPACE::RawIterator*>(native_handle)->value();
- jbyteArray jkey = env->NewByteArray(static_cast<jsize>(slice.size()));
- if (jkey == nullptr) {
- // exception thrown: OutOfMemoryError
- return nullptr;
- }
- env->SetByteArrayRegion(
- jkey, 0, static_cast<jsize>(slice.size()),
- const_cast<jbyte*>(reinterpret_cast<const jbyte*>(slice.data())));
- return jkey;
+ return copyToDirect(env, slice, jtarget, jtarget_off, jtarget_len);
}
-jlong
Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getSequenceNumber(JNIEnv
*env,
-
jobject obj,
-
jlong native_handle) {
- uint64_t sequence_number =
-
reinterpret_cast<ROCKSDB_NAMESPACE::RawIterator*>(native_handle)->sequenceNumber();
+jlong
Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileIterator_getSequenceNumber(JNIEnv
*env,
+
jobject obj,
+
jlong native_handle) {
+ uint64_t sequence_number =
reinterpret_cast<ROCKSDB_NAMESPACE::RawIterator*>(native_handle)->sequenceNumber();
jlong result;
std::memcpy(&result, &sequence_number, sizeof(jlong));
return result;
}
-jint
Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getType(JNIEnv
*env,
-
jobject obj,
-
jlong native_handle) {
+jint
Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileIterator_getType(JNIEnv
*env,
+
jobject obj,
+
jlong native_handle) {
uint32_t type =
reinterpret_cast<ROCKSDB_NAMESPACE::RawIterator*>(native_handle)->type();
return static_cast<jint>(type);
}
-void
Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_closeInternal(JNIEnv
*env,
-
jobject obj,
-
jlong native_handle) {
+void
Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileIterator_closeInternal(JNIEnv
*env,
+
jobject obj,
+
jlong native_handle) {
delete reinterpret_cast<ROCKSDB_NAMESPACE::RawIterator*>(native_handle);
}
+
+template <class T>
+static jint copyToDirect(JNIEnv* env, T& source, jobject jtarget,
+ jint jtarget_off, jint jtarget_len) {
+ char* target = reinterpret_cast<char*>(env->GetDirectBufferAddress(jtarget));
+ if (target == nullptr || env->GetDirectBufferCapacity(jtarget) <
(jtarget_off + jtarget_len)) {
+ jclass exClass = env->FindClass("java/lang/IllegalArgumentException");
+ if (exClass != nullptr) {
+ env->ThrowNew(exClass, "Invalid buffer address or capacity");
+ }
+ return -1;
+ }
+
+ target += jtarget_off;
+
+ const jint cvalue_len = static_cast<jint>(source.size());
+ const jint length = std::min(jtarget_len, cvalue_len);
+
+ memcpy(target, source.data(), length);
+
+ return cvalue_len;
+}
diff --git
a/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp
b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp
index f3b8dc02639..ff49ee56f06 100644
--- a/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp
+++ b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-#include "org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader.h"
+#include "org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileReader.h"
#include "rocksdb/options.h"
#include "rocksdb/raw_sst_file_reader.h"
#include "rocksdb/raw_iterator.h"
@@ -24,7 +24,7 @@
#include "cplusplus_to_java_convert.h"
#include <iostream>
-jlong
Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_newRawSSTFileReader(JNIEnv
*env, jobject obj,
+jlong
Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileReader_newRawSSTFileReader(JNIEnv
*env, jobject obj,
jlong options_handle,
jstring jfilename,
jint readahead_size) {
@@ -37,7 +37,7 @@ jlong
Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_newRa
return GET_CPLUSPLUS_POINTER(raw_sst_file_reader);
}
-jlong
Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_newIterator(JNIEnv
*env, jobject obj,
+jlong
Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileReader_newIterator(JNIEnv
*env, jobject obj,
jlong native_handle,
jboolean jhas_from,
jlong from_slice_handle,
@@ -59,7 +59,7 @@ jlong
Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_newIt
return GET_CPLUSPLUS_POINTER(iterator);
}
-void
Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_disposeInternal(JNIEnv
*env, jobject obj,
+void
Java_org_apache_hadoop_hdds_utils_db_ManagedRawSSTFileReader_disposeInternal(JNIEnv
*env, jobject obj,
jlong native_handle) {
delete
reinterpret_cast<ROCKSDB_NAMESPACE::RawSstFileReader*>(native_handle);
}
diff --git
a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java
b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java
index 3dbbdc35513..b8ac1c132e0 100644
---
a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java
+++
b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java
@@ -37,7 +37,7 @@
import java.util.Map;
import java.util.stream.Stream;
import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader;
+import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileReader;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
diff --git
a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedRawSSTFileIterator.java
b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/TestManagedRawSSTFileIterator.java
similarity index 86%
rename from
hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedRawSSTFileIterator.java
rename to
hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/TestManagedRawSSTFileIterator.java
index e7e2a398e54..fee69e6ba18 100644
---
a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedRawSSTFileIterator.java
+++
b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/TestManagedRawSSTFileIterator.java
@@ -15,7 +15,7 @@
* limitations under the License.
*/
-package org.apache.hadoop.hdds.utils.db.managed;
+package org.apache.hadoop.hdds.utils.db;
import static
org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_PROPERTY;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -35,11 +35,15 @@
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
+import org.apache.commons.lang3.RandomStringUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException;
import org.apache.hadoop.hdds.utils.TestUtils;
-import org.apache.hadoop.hdds.utils.db.IteratorType;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedEnvOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedSlice;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileWriter;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Named;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
@@ -90,7 +94,13 @@ private static Stream<Arguments> keyValueFormatArgs() {
Named.of("Value starting & ending with a number & containing null
character & new line character",
"%1$dvalue\n\0%1$d")),
Arguments.of(Named.of("Key ending with a number & containing a null
character", "key\0%1$d"),
- Named.of("Value starting & ending with a number & elosed within
quotes", "%1$dvalue\r%1$d")))
+ Named.of("Value starting & ending with a number & elosed within
quotes", "%1$dvalue\r%1$d")),
+ Arguments.of(Named.of("Key with prefix length 5k of random
alphaNumeric string",
+ new
StringBuilder(RandomStringUtils.secure().nextAlphanumeric(5 << 10))
+ .append("key%1$d").toString()),
+ Named.of("Value with prefix length 5k of random alphaNumeric
string",
+ new
StringBuilder(RandomStringUtils.secure().nextAlphanumeric(5 << 10))
+ .append("%1$dvalue%1$d").toString())))
.flatMap(i -> Arrays.stream(IteratorType.values()).map(type ->
Arguments.of(i.get()[0], i.get()[1], type)));
}
@@ -109,7 +119,7 @@ public void testSSTDumpIteratorWithKeyFormat(String
keyFormat, String valueForma
(v1, v2) -> v2, TreeMap::new));
File file = createSSTFileWithKeys(keys);
try (ManagedOptions options = new ManagedOptions();
- ManagedRawSSTFileReader<ManagedRawSSTFileIterator.KeyValue> reader =
new ManagedRawSSTFileReader<>(
+ ManagedRawSSTFileReader reader = new ManagedRawSSTFileReader(
options, file.getAbsolutePath(), 2 * 1024 * 1024)) {
List<Optional<String>> testBounds =
TestUtils.getTestingBounds(keys.keySet().stream()
.collect(Collectors.toMap(Pair::getKey, Pair::getValue, (v1, v2) ->
v1, TreeMap::new)));
@@ -129,10 +139,10 @@ public void testSSTDumpIteratorWithKeyFormat(String
keyFormat, String valueForma
ManagedRawSSTFileIterator.KeyValue r = iterator.next();
assertTrue(expectedKeyItr.hasNext());
Map.Entry<Pair<String, Integer>, String> expectedKey =
expectedKeyItr.next();
- String key = r.getKey() == null ? null :
StringUtils.bytes2String(r.getKey());
+ String key = r.getKey() == null ? null :
StringCodec.get().fromCodecBuffer(r.getKey());
assertEquals(type.readKey() ? expectedKey.getKey().getKey() :
null, key);
assertEquals(type.readValue() ? expectedKey.getValue() : null,
- type.readValue() ? StringUtils.bytes2String(r.getValue()) :
r.getValue());
+ type.readValue() ?
StringCodec.get().fromCodecBuffer(r.getValue()) : r.getValue());
expectedKeyItr.remove();
}
assertEquals(0, expectedKeys.size());
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedSstFileIterator.java
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedSstFileIterator.java
new file mode 100644
index 00000000000..abfbd48e347
--- /dev/null
+++
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/ManagedSstFileIterator.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.utils.db;
+
+import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedReadOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReaderIterator;
+import org.apache.hadoop.ozone.util.ClosableIterator;
+import org.rocksdb.RocksDBException;
+
+/**
+ * ManagedSstFileIterator is an abstract class designed to provide a managed,
resource-safe
+ * iteration over SST (Sorted String Table) files leveraging RocksDB. It
implements the
+ * {@link ClosableIterator} interface to support resource management and
ensures proper
+ * cleanup of resources upon closure. This class binds together a
ManagedSstFileReader,
+ * ManagedSstFileReaderIterator, and Buffers for keys and values, while
allowing specific
+ * implementations to define how the iterator values are derived.
+ *
+ * @param <T> The type of the element to be returned by the iterator.
+ */
+abstract class ManagedSstFileIterator<T> implements ClosableIterator<T> {
+ private final ManagedSstFileReader fileReader;
+ private final ManagedSstFileReaderIterator fileReaderIterator;
+ private final IteratorType type;
+ private boolean closed;
+ private final Buffer keyBuffer;
+ private final Buffer valueBuffer;
+
+ ManagedSstFileIterator(String path, ManagedOptions options,
ManagedReadOptions readOptions,
+ IteratorType type) throws RocksDatabaseException {
+ try {
+ this.fileReader = new ManagedSstFileReader(options);
+ this.fileReader.open(path);
+ this.fileReaderIterator =
ManagedSstFileReaderIterator.managed(fileReader.newIterator(readOptions));
+ fileReaderIterator.get().seekToFirst();
+ this.closed = false;
+ this.type = type;
+ this.keyBuffer = new Buffer(
+ new CodecBuffer.Capacity(path + " iterator-key", 1 << 10),
+ this.type.readKey() ? buffer -> fileReaderIterator.get().key(buffer)
: null);
+ this.valueBuffer = new Buffer(
+ new CodecBuffer.Capacity(path + " iterator-value", 4 << 10),
+ this.type.readValue() ? buffer ->
fileReaderIterator.get().value(buffer) : null);
+ } catch (RocksDBException e) {
+ throw new RocksDatabaseException("Failed to open SST file: " + path, e);
+ }
+ }
+
+ @Override
+ public synchronized void close() {
+ if (!closed) {
+ this.fileReaderIterator.close();
+ this.fileReader.close();
+ keyBuffer.release();
+ valueBuffer.release();
+ }
+ closed = true;
+ }
+
+ @Override
+ public synchronized boolean hasNext() {
+ return fileReaderIterator.get().isValid();
+ }
+
+ abstract T getIteratorValue(CodecBuffer key, CodecBuffer value);
+
+ @Override
+ public synchronized T next() {
+ T value = getIteratorValue(this.type.readKey() ? keyBuffer.getFromDb() :
null,
+ this.type.readValue() ? valueBuffer.getFromDb() : null);
+ fileReaderIterator.get().next();
+ return value;
+ }
+}
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/SstFileSetReader.java
similarity index 76%
rename from
hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java
rename to
hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/SstFileSetReader.java
index 7fbd80cf091..b4c39ccc9c2 100644
---
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java
+++
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/SstFileSetReader.java
@@ -15,7 +15,7 @@
* limitations under the License.
*/
-package org.apache.ozone.rocksdb.util;
+package org.apache.hadoop.hdds.utils.db;
import static org.apache.hadoop.hdds.utils.db.IteratorType.KEY_ONLY;
@@ -30,19 +30,11 @@
import java.util.function.Function;
import java.util.stream.Collectors;
import org.apache.hadoop.hdds.utils.IOUtils;
-import org.apache.hadoop.hdds.utils.db.CodecException;
-import org.apache.hadoop.hdds.utils.db.IteratorType;
-import org.apache.hadoop.hdds.utils.db.MinHeapMergeIterator;
-import org.apache.hadoop.hdds.utils.db.RocksDatabaseException;
-import org.apache.hadoop.hdds.utils.db.StringCodec;
+import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileIterator.KeyValue;
import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions;
-import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator;
-import
org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator.KeyValue;
-import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader;
import org.apache.hadoop.hdds.utils.db.managed.ManagedReadOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedSlice;
import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader;
-import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReaderIterator;
import org.apache.hadoop.ozone.util.ClosableIterator;
import org.rocksdb.RocksDBException;
@@ -116,10 +108,11 @@ protected void init() throws CodecException {
@Override
protected ClosableIterator<String> getKeyIteratorForFile(String file)
throws RocksDatabaseException {
- return new ManagedSstFileIterator(file, options, readOptions) {
+ return new ManagedSstFileIterator<String>(file, options, readOptions,
KEY_ONLY) {
+
@Override
- protected String getIteratorValue(ManagedSstFileReaderIterator
iterator) {
- return StringCodec.get().fromPersistedFormat(iterator.get().key());
+ String getIteratorValue(CodecBuffer key, CodecBuffer value) {
+ return StringCodec.get().fromCodecBuffer(key);
}
};
}
@@ -159,7 +152,7 @@ protected void init() throws CodecException {
@Override
protected ClosableIterator<String> getKeyIteratorForFile(String file) {
return new ManagedRawSstFileIterator(file, options, lowerBoundSlice,
upperBoundSlice,
- keyValue ->
StringCodec.get().fromPersistedFormat(keyValue.getKey()), KEY_ONLY);
+ keyValue -> StringCodec.get().fromCodecBuffer(keyValue.getKey()),
KEY_ONLY);
}
@Override
@@ -172,51 +165,14 @@ public void close() throws UncheckedIOException {
return itr;
}
- private abstract static class ManagedSstFileIterator implements
ClosableIterator<String> {
- private final ManagedSstFileReader fileReader;
- private final ManagedSstFileReaderIterator fileReaderIterator;
-
- ManagedSstFileIterator(String path, ManagedOptions options,
ManagedReadOptions readOptions)
- throws RocksDatabaseException {
- try {
- this.fileReader = new ManagedSstFileReader(options);
- this.fileReader.open(path);
- this.fileReaderIterator =
ManagedSstFileReaderIterator.managed(fileReader.newIterator(readOptions));
- fileReaderIterator.get().seekToFirst();
- } catch (RocksDBException e) {
- throw new RocksDatabaseException("Failed to open SST file: " + path,
e);
- }
- }
-
- @Override
- public void close() {
- this.fileReaderIterator.close();
- this.fileReader.close();
- }
-
- @Override
- public boolean hasNext() {
- return fileReaderIterator.get().isValid();
- }
-
- protected abstract String getIteratorValue(ManagedSstFileReaderIterator
iterator);
-
- @Override
- public String next() {
- String value = getIteratorValue(fileReaderIterator);
- fileReaderIterator.get().next();
- return value;
- }
- }
-
private static class ManagedRawSstFileIterator implements
ClosableIterator<String> {
- private final ManagedRawSSTFileReader<String> fileReader;
+ private final ManagedRawSSTFileReader fileReader;
private final ManagedRawSSTFileIterator<String> fileReaderIterator;
private static final int READ_AHEAD_SIZE = 2 * 1024 * 1024;
ManagedRawSstFileIterator(String path, ManagedOptions options,
ManagedSlice lowerBound, ManagedSlice upperBound,
Function<KeyValue, String> keyValueFunction,
IteratorType type) {
- this.fileReader = new ManagedRawSSTFileReader<>(options, path,
READ_AHEAD_SIZE);
+ this.fileReader = new ManagedRawSSTFileReader(options, path,
READ_AHEAD_SIZE);
this.fileReaderIterator = fileReader.newIterator(keyValueFunction,
lowerBound, upperBound, type);
}
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
index d69515a1123..956a0caac7c 100644
---
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
+++
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
@@ -19,7 +19,6 @@
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.function.Function.identity;
-import static org.apache.commons.lang3.ArrayUtils.EMPTY_BYTE_ARRAY;
import static org.apache.hadoop.hdds.utils.db.IteratorType.KEY_ONLY;
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED;
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED_DEFAULT;
@@ -75,16 +74,17 @@
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException;
import org.apache.hadoop.hdds.utils.Scheduler;
+import org.apache.hadoop.hdds.utils.db.CodecBuffer;
+import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileIterator;
+import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileReader;
+import org.apache.hadoop.hdds.utils.db.RDBSstFileWriter;
import org.apache.hadoop.hdds.utils.db.RocksDatabaseException;
import org.apache.hadoop.hdds.utils.db.TablePrefixInfo;
import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedEnvOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions;
-import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator;
-import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator;
-import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileWriter;
import org.apache.hadoop.ozone.lock.BootstrapStateHandler;
import org.apache.ozone.compaction.log.CompactionFileInfo;
import org.apache.ozone.compaction.log.CompactionLogEntry;
@@ -1322,8 +1322,7 @@ public void pruneSstFileValues() {
// Prune file.sst => pruned.sst.tmp
Files.deleteIfExists(prunedSSTFilePath);
- removeValueFromSSTFile(managedOptions, envOptions,
sstFilePath.toFile().getAbsolutePath(),
- prunedSSTFilePath.toFile().getAbsolutePath());
+ removeValueFromSSTFile(managedOptions,
sstFilePath.toFile().getAbsolutePath(), prunedSSTFilePath.toFile());
// Move pruned.sst.tmp => file.sst and replace existing file
atomically.
try (UncheckedAutoCloseable lock =
getBootstrapStateLock().acquireReadLock()) {
@@ -1363,26 +1362,20 @@ public void pruneSstFileValues() {
}
}
- private void removeValueFromSSTFile(ManagedOptions options,
ManagedEnvOptions envOptions,
- String sstFilePath, String prunedFilePath)
- throws IOException {
- try (ManagedRawSSTFileReader<Pair<byte[], Integer>> sstFileReader = new
ManagedRawSSTFileReader<>(
- options, sstFilePath, SST_READ_AHEAD_SIZE);
- ManagedRawSSTFileIterator<Pair<byte[], Integer>> itr =
sstFileReader.newIterator(
+ private void removeValueFromSSTFile(ManagedOptions options, String
sstFilePath, File prunedFile) throws IOException {
+ try (ManagedRawSSTFileReader sstFileReader = new
ManagedRawSSTFileReader(options, sstFilePath, SST_READ_AHEAD_SIZE);
+ ManagedRawSSTFileIterator<Pair<CodecBuffer, Integer>> itr =
sstFileReader.newIterator(
keyValue -> Pair.of(keyValue.getKey(), keyValue.getType()), null,
null, KEY_ONLY);
- ManagedSstFileWriter sstFileWriter = new
ManagedSstFileWriter(envOptions, options);) {
- sstFileWriter.open(prunedFilePath);
+ RDBSstFileWriter sstFileWriter = new RDBSstFileWriter(prunedFile);
+ CodecBuffer emptyCodecBuffer = CodecBuffer.getEmptyBuffer()) {
while (itr.hasNext()) {
- Pair<byte[], Integer> keyValue = itr.next();
+ Pair<CodecBuffer, Integer> keyValue = itr.next();
if (keyValue.getValue() == 0) {
sstFileWriter.delete(keyValue.getKey());
} else {
- sstFileWriter.put(keyValue.getKey(), EMPTY_BYTE_ARRAY);
+ sstFileWriter.put(keyValue.getKey(), emptyCodecBuffer);
}
}
- sstFileWriter.finish();
- } catch (RocksDBException ex) {
- throw new RocksDatabaseException("Failed to write pruned entries for " +
sstFilePath, ex);
}
}
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBSstFileWriter.java
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBSstFileWriter.java
index c3129da85c4..4476bcb808d 100644
---
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBSstFileWriter.java
+++
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBSstFileWriter.java
@@ -28,13 +28,10 @@
import java.util.Queue;
import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions;
-import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator;
-import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
import org.junit.jupiter.api.io.TempDir;
-import org.rocksdb.RocksDBException;
/**
* Test for RDBSstFileWriter.
@@ -46,7 +43,7 @@ public class TestRDBSstFileWriter {
@EnabledIfSystemProperty(named = ROCKS_TOOLS_NATIVE_PROPERTY, matches =
"true")
@Test
- public void testSstFileTombstoneCreationWithCodecBufferReuse() throws
IOException, RocksDBException {
+ public void testSstFileTombstoneCreationWithCodecBufferReuse() throws
IOException {
ManagedRawSSTFileReader.tryLoadLibrary();
Path sstPath = path.resolve("test.sst").toAbsolutePath();
try (CodecBuffer codecBuffer = CodecBuffer.allocateDirect(1024);
@@ -78,8 +75,7 @@ public void
testSstFileTombstoneCreationWithCodecBufferReuse() throws IOExceptio
}
Assertions.assertTrue(sstPath.toFile().exists());
try (ManagedOptions options = new ManagedOptions();
- ManagedRawSSTFileReader<ManagedRawSSTFileIterator.KeyValue> reader =
new ManagedRawSSTFileReader<>(options,
- sstPath.toString(), 1024);
+ ManagedRawSSTFileReader reader = new ManagedRawSSTFileReader(options,
sstPath.toString(), 1024);
ManagedRawSSTFileIterator<ManagedRawSSTFileIterator.KeyValue> itr =
reader.newIterator(kv -> kv, null, null,
IteratorType.KEY_AND_VALUE)) {
@@ -89,7 +85,7 @@ public void
testSstFileTombstoneCreationWithCodecBufferReuse() throws IOExceptio
ManagedRawSSTFileIterator.KeyValue kv = itr.next();
assertEquals(idx, kv.getType());
assertEquals(keys.get(idx), keys.get(idx++));
- assertEquals(0, kv.getValue().length);
+ assertEquals(0, kv.getValue().readableBytes());
}
assertEquals(2, idx);
}
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestSstFileSetReader.java
similarity index 98%
rename from
hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java
rename to
hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestSstFileSetReader.java
index c20eb8a20ed..fd4bcbb6d90 100644
---
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java
+++
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/hadoop/hdds/utils/db/TestSstFileSetReader.java
@@ -15,7 +15,7 @@
* limitations under the License.
*/
-package org.apache.ozone.rocksdb.util;
+package org.apache.hadoop.hdds.utils.db;
import static
org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_PROPERTY;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -39,10 +39,8 @@
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.utils.TestUtils;
-import org.apache.hadoop.hdds.utils.db.CodecException;
import org.apache.hadoop.hdds.utils.db.managed.ManagedEnvOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions;
-import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader;
import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileWriter;
import org.apache.hadoop.ozone.util.ClosableIterator;
import org.junit.jupiter.api.condition.EnabledIfSystemProperty;
@@ -63,8 +61,8 @@ class TestSstFileSetReader {
// Key prefix containing all characters, to check if all characters can be
// written & read from rocksdb through SSTDumptool
- private static final String KEY_PREFIX = IntStream.range(0, 256).boxed()
- .map(i -> String.format("%c", i))
+ private static final String KEY_PREFIX = IntStream.range(0, 5 << 10).boxed()
+ .map(i -> String.format("%c", i % 256))
.collect(Collectors.joining(""));
/**
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestCompactionDag.java
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestCompactionDag.java
index 5c527948fdc..2fde23bb376 100644
---
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestCompactionDag.java
+++
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestCompactionDag.java
@@ -63,9 +63,9 @@
import java.util.stream.Stream;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileReader;
import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
-import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator;
import org.apache.ozone.compaction.log.CompactionLogEntry;
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
index b1867a3d5eb..9c1fb6b0a06 100644
---
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
+++
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
@@ -44,6 +44,7 @@
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -56,6 +57,7 @@
import java.io.FileWriter;
import java.io.IOException;
import java.io.OutputStream;
+import java.nio.ByteBuffer;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
@@ -90,22 +92,22 @@
import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.hdds.utils.db.CodecBuffer;
+import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileIterator;
+import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileReader;
+import org.apache.hadoop.hdds.utils.db.RDBSstFileWriter;
import org.apache.hadoop.hdds.utils.db.RocksDatabaseException;
import org.apache.hadoop.hdds.utils.db.TablePrefixInfo;
import org.apache.hadoop.hdds.utils.db.managed.ManagedCheckpoint;
import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
-import org.apache.hadoop.hdds.utils.db.managed.ManagedEnvOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedFlushOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions;
-import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator;
-import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader;
import org.apache.hadoop.hdds.utils.db.managed.ManagedReadOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator;
import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader;
import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReaderIterator;
-import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileWriter;
import org.apache.hadoop.util.Time;
import org.apache.ozone.compaction.log.CompactionFileInfo;
import org.apache.ozone.compaction.log.CompactionLogEntry;
@@ -1608,20 +1610,21 @@ public void testPruneSSTFileValues() throws Exception {
assertEquals(0L, sstFilePruningMetrics.getCompactionsProcessed());
assertEquals(0L, sstFilePruningMetrics.getFilesRemovedTotal());
- List<Pair<byte[], Integer>> keys = new ArrayList<Pair<byte[], Integer>>();
- keys.add(Pair.of("key1".getBytes(UTF_8), Integer.valueOf(1)));
- keys.add(Pair.of("key2".getBytes(UTF_8), Integer.valueOf(0)));
- keys.add(Pair.of("key3".getBytes(UTF_8), Integer.valueOf(1)));
+ List<Pair<String, Integer>> keys = new ArrayList<Pair<String, Integer>>();
+ keys.add(Pair.of("key1", Integer.valueOf(1)));
+ keys.add(Pair.of("key2", Integer.valueOf(0)));
+ keys.add(Pair.of("key3", Integer.valueOf(1)));
String inputFile78 = "000078";
String inputFile73 = "000073";
String outputFile81 = "000081";
// Create src & destination files in backup & activedirectory.
// Pruning job should succeed when pruned temp file is already present.
- createSSTFileWithKeys(sstBackUpDir + "/" + inputFile78 +
SST_FILE_EXTENSION, keys);
- createSSTFileWithKeys(sstBackUpDir + "/" + inputFile73 +
SST_FILE_EXTENSION, keys);
- createSSTFileWithKeys(sstBackUpDir + PRUNED_SST_FILE_TEMP, keys);
- createSSTFileWithKeys(activeDbDir + "/" + outputFile81 +
SST_FILE_EXTENSION, keys);
+ Path sstBackupDirPath = sstBackUpDir.toPath();
+ createSSTFileWithKeys(sstBackupDirPath.resolve(inputFile78 +
SST_FILE_EXTENSION).toFile(), keys);
+ createSSTFileWithKeys(sstBackupDirPath.resolve(inputFile73 +
SST_FILE_EXTENSION).toFile(), keys);
+
createSSTFileWithKeys(sstBackupDirPath.resolve(PRUNED_SST_FILE_TEMP).toFile(),
keys);
+ createSSTFileWithKeys(activeDbDir.toPath().resolve(outputFile81 +
SST_FILE_EXTENSION).toFile(), keys);
// Load compaction log
CompactionLogEntry compactionLogEntry = new CompactionLogEntry(178,
System.currentTimeMillis(),
@@ -1639,15 +1642,21 @@ public void testPruneSSTFileValues() throws Exception {
// Pruning should not fail a source SST file has been removed by another
pruner.
Files.delete(sstBackUpDir.toPath().resolve(inputFile73 +
SST_FILE_EXTENSION));
// Run the SST file pruner.
- ManagedRawSSTFileIterator mockedRawSSTFileItr =
mock(ManagedRawSSTFileIterator.class);
- Iterator keyItr = keys.iterator();
- when(mockedRawSSTFileItr.hasNext()).thenReturn(true, true, true, false);
- when(mockedRawSSTFileItr.next()).thenReturn(keyItr.next(), keyItr.next(),
keyItr.next());
- try (MockedConstruction<ManagedRawSSTFileReader> mockedRawSSTReader =
Mockito.mockConstruction(
- ManagedRawSSTFileReader.class, (mock, context) -> {
- when(mock.newIterator(any(), any(), any(),
any())).thenReturn(mockedRawSSTFileItr);
- doNothing().when(mock).close();
- })) {
+
+ try (CodecBuffer keyCodecBuffer = CodecBuffer.allocateDirect(1024);
+ MockedConstruction<ManagedRawSSTFileReader> mockedRawSSTReader =
Mockito.mockConstruction(
+ ManagedRawSSTFileReader.class, (mock, context) -> {
+ ManagedRawSSTFileIterator mockedRawSSTFileItr =
mock(ManagedRawSSTFileIterator.class);
+ Iterator<Pair<CodecBuffer, Integer>> keyItr =
keys.stream().map(i -> {
+ keyCodecBuffer.clear();
+
keyCodecBuffer.put(ByteBuffer.wrap(i.getKey().getBytes(UTF_8)));
+ return Pair.of(keyCodecBuffer, i.getValue());
+ }).iterator();
+ doAnswer(i ->
keyItr.hasNext()).when(mockedRawSSTFileItr).hasNext();
+ doAnswer(i -> keyItr.next()).when(mockedRawSSTFileItr).next();
+ when(mock.newIterator(any(), any(), any(),
any())).thenReturn(mockedRawSSTFileItr);
+ doNothing().when(mock).close();
+ })) {
rocksDBCheckpointDiffer.pruneSstFileValues();
}
// pruned.sst.tmp should be deleted when pruning job exits successfully.
@@ -1692,22 +1701,18 @@ public void testPruneSSTFileValues() throws Exception {
assertEquals(1L, sstFilePruningMetrics.getFilesRemovedTotal());
}
- private void createSSTFileWithKeys(String filePath, List<Pair<byte[],
Integer>> keys)
- throws Exception {
- try (ManagedSstFileWriter sstFileWriter = new ManagedSstFileWriter(new
ManagedEnvOptions(), new ManagedOptions())) {
- sstFileWriter.open(filePath);
- Iterator<Pair<byte[], Integer>> itr = keys.iterator();
+ private void createSSTFileWithKeys(File file, List<Pair<String, Integer>>
keys) throws RocksDatabaseException {
+ byte[] value = "dummyValue".getBytes(UTF_8);
+ try (RDBSstFileWriter sstFileWriter = new RDBSstFileWriter(file)) {
+ Iterator<Pair<String, Integer>> itr = keys.iterator();
while (itr.hasNext()) {
- Pair<byte[], Integer> entry = itr.next();
+ Pair<String, Integer> entry = itr.next();
if (entry.getValue() == 0) {
- sstFileWriter.delete(entry.getKey());
+ sstFileWriter.delete(entry.getKey().getBytes(UTF_8));
} else {
- sstFileWriter.put(entry.getKey(), "dummyValue".getBytes(UTF_8));
+ sstFileWriter.put(entry.getKey().getBytes(UTF_8), value);
}
}
- sstFileWriter.finish();
- } catch (RocksDBException ex) {
- throw new RocksDatabaseException("Failed to get write " + filePath, ex);
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
index dece2e25894..5f14451fad3 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
@@ -101,13 +101,14 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import
org.apache.hadoop.hdds.protocol.proto.HddsProtos.CompactionLogEntryProto;
import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils;
+import org.apache.hadoop.hdds.utils.db.CodecBuffer;
import org.apache.hadoop.hdds.utils.db.DBProfile;
import org.apache.hadoop.hdds.utils.db.DBStore;
+import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileIterator;
+import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileReader;
import org.apache.hadoop.hdds.utils.db.RDBStore;
import org.apache.hadoop.hdds.utils.db.RocksDatabase;
import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions;
-import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator;
-import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
@@ -2522,12 +2523,12 @@ public void testSnapshotCompactionDag() throws
Exception {
java.nio.file.Path file = sstBackUpDir.resolve(f.getFileName()
+ ".sst");
if
(COLUMN_FAMILIES_TO_TRACK_IN_DAG.contains(f.getColumnFamily()) &&
java.nio.file.Files.exists(file)) {
assertTrue(f.isPruned());
- try (ManagedRawSSTFileReader<byte[]> sstFileReader = new
ManagedRawSSTFileReader<>(
+ try (ManagedRawSSTFileReader sstFileReader = new
ManagedRawSSTFileReader(
managedOptions, file.toFile().getAbsolutePath(), 2 *
1024 * 1024);
- ManagedRawSSTFileIterator<byte[]> itr =
sstFileReader.newIterator(
- keyValue -> keyValue.getValue(), null, null,
KEY_AND_VALUE)) {
+ ManagedRawSSTFileIterator<CodecBuffer> itr =
sstFileReader.newIterator(
+ ManagedRawSSTFileIterator.KeyValue::getValue, null,
null, KEY_AND_VALUE)) {
while (itr.hasNext()) {
- assertEquals(0, itr.next().length);
+ assertEquals(0, itr.next().readableBytes());
}
}
} else {
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java
index 0c764f94886..2147fc3ec18 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java
@@ -97,11 +97,12 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException;
import org.apache.hadoop.hdds.utils.db.CodecRegistry;
+import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileReader;
import org.apache.hadoop.hdds.utils.db.RDBStore;
+import org.apache.hadoop.hdds.utils.db.SstFileSetReader;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TablePrefixInfo;
import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
-import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
import org.apache.hadoop.ozone.OFSPath;
@@ -129,7 +130,6 @@
import org.apache.hadoop.ozone.util.ClosableIterator;
import org.apache.logging.log4j.util.Strings;
import org.apache.ozone.rocksdb.util.SstFileInfo;
-import org.apache.ozone.rocksdb.util.SstFileSetReader;
import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer;
import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier;
import org.rocksdb.ColumnFamilyDescriptor;
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/defrag/SnapshotDefragService.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/defrag/SnapshotDefragService.java
index daca3709533..87e0704d10a 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/defrag/SnapshotDefragService.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/defrag/SnapshotDefragService.java
@@ -57,16 +57,17 @@
import org.apache.hadoop.hdds.utils.db.CodecException;
import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
import org.apache.hadoop.hdds.utils.db.DBStore;
+import org.apache.hadoop.hdds.utils.db.ManagedRawSSTFileReader;
import org.apache.hadoop.hdds.utils.db.RDBSstFileWriter;
import org.apache.hadoop.hdds.utils.db.RDBStore;
import org.apache.hadoop.hdds.utils.db.RocksDBCheckpoint;
import org.apache.hadoop.hdds.utils.db.RocksDatabaseException;
+import org.apache.hadoop.hdds.utils.db.SstFileSetReader;
import org.apache.hadoop.hdds.utils.db.StringCodec;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.hdds.utils.db.TablePrefixInfo;
import org.apache.hadoop.hdds.utils.db.managed.ManagedCompactRangeOptions;
-import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader;
import org.apache.hadoop.ozone.lock.BootstrapStateHandler;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.OmSnapshot;
@@ -88,7 +89,6 @@
import org.apache.hadoop.util.Time;
import org.apache.logging.log4j.util.Strings;
import org.apache.ozone.rocksdb.util.SstFileInfo;
-import org.apache.ozone.rocksdb.util.SstFileSetReader;
import org.apache.ratis.util.UncheckedAutoCloseable;
import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier;
import org.slf4j.Logger;
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java
index 5d681640249..5a82c3b1591 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java
@@ -108,6 +108,7 @@
import org.apache.hadoop.hdds.utils.db.CodecRegistry;
import org.apache.hadoop.hdds.utils.db.RDBStore;
import org.apache.hadoop.hdds.utils.db.RocksDatabase;
+import org.apache.hadoop.hdds.utils.db.SstFileSetReader;
import org.apache.hadoop.hdds.utils.db.StringInMemoryTestTable;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TablePrefixInfo;
@@ -140,7 +141,6 @@
import org.apache.hadoop.ozone.util.ClosableIterator;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ExitUtil;
-import org.apache.ozone.rocksdb.util.SstFileSetReader;
import org.apache.ratis.util.ExitUtils;
import org.apache.ratis.util.TimeDuration;
import org.junit.jupiter.api.AfterEach;
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/defrag/TestSnapshotDefragService.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/defrag/TestSnapshotDefragService.java
index 6a201ba8863..00c1b73398a 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/defrag/TestSnapshotDefragService.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/defrag/TestSnapshotDefragService.java
@@ -84,6 +84,7 @@
import org.apache.hadoop.hdds.utils.db.RDBSstFileWriter;
import org.apache.hadoop.hdds.utils.db.RocksDBCheckpoint;
import org.apache.hadoop.hdds.utils.db.RocksDatabaseException;
+import org.apache.hadoop.hdds.utils.db.SstFileSetReader;
import org.apache.hadoop.hdds.utils.db.StringCodec;
import org.apache.hadoop.hdds.utils.db.StringInMemoryTestTable;
import org.apache.hadoop.hdds.utils.db.Table;
@@ -108,7 +109,6 @@
import org.apache.hadoop.ozone.upgrade.LayoutFeature;
import org.apache.hadoop.ozone.util.ClosableIterator;
import org.apache.ozone.rocksdb.util.SstFileInfo;
-import org.apache.ozone.rocksdb.util.SstFileSetReader;
import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]