szetszwo commented on code in PR #9552:
URL: https://github.com/apache/ozone/pull/9552#discussion_r2659133382


##########
hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java:
##########
@@ -135,16 +136,26 @@ public void close() {
     }
   }
 
-  private abstract static class Op implements Closeable {
+  private abstract static class SingleKeyOp extends Op {

Review Comment:
   Move SingleKeyOp to below Op.



##########
hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBBatchOperation.java:
##########
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.utils.db;
+
+import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.when;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.primitives.UnsignedBytes;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ThreadLocalRandom;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteBatch;
+import 
org.apache.hadoop.hdds.utils.db.managed.TrackingUtilManagedWriteBatchForTesting;
+import 
org.apache.hadoop.hdds.utils.db.managed.TrackingUtilManagedWriteBatchForTesting.OpType;
+import 
org.apache.hadoop.hdds.utils.db.managed.TrackingUtilManagedWriteBatchForTesting.Operation;
+import org.apache.hadoop.ozone.util.ClosableIterator;
+import org.apache.ratis.util.function.CheckedConsumer;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
+import org.mockito.Mockito;
+import org.rocksdb.ColumnFamilyHandle;
+import org.rocksdb.RocksDBException;
+
+/**
+ * The TestRDBBatchOperation class provides test cases to validate the 
functionality of RDB batch operations
+ * in a RocksDB-based backend. It verifies the correct behavior of write 
operations using batch processing
+ * and ensures the integrity of operations like put and delete when performed 
in batch mode.
+ */
+public class TestRDBBatchOperation {
+
+  static {
+    ManagedRocksObjectUtils.loadRocksDBLibrary();
+  }
+
+  @TempDir
+  private Path tempDir;
+
+  private static Operation getOperation(String key, String value, OpType 
opType) {
+    return new Operation(string2Bytes(key), value == null ? null : 
string2Bytes(value), opType);
+  }
+
+  @Test
+  public void testBatchOperation() throws RocksDatabaseException, 
CodecException, RocksDBException {
+    try (TrackingUtilManagedWriteBatchForTesting writeBatch = new 
TrackingUtilManagedWriteBatchForTesting();
+         RDBBatchOperation batchOperation = 
RDBBatchOperation.newAtomicOperation(writeBatch)) {
+      ColumnFamilyHandle columnFamilyHandle = 
Mockito.mock(ColumnFamilyHandle.class);
+      RocksDatabase.ColumnFamily columnFamily = 
Mockito.mock(RocksDatabase.ColumnFamily.class);
+      doAnswer((i) -> {
+        ((ManagedWriteBatch)i.getArgument(0))
+            .put(columnFamilyHandle, (ByteBuffer) i.getArgument(1), 
(ByteBuffer) i.getArgument(2));
+        return null;
+      }).when(columnFamily).batchPut(any(ManagedWriteBatch.class), 
any(ByteBuffer.class), any(ByteBuffer.class));
+
+      doAnswer((i) -> {
+        ((ManagedWriteBatch)i.getArgument(0))
+            .delete(columnFamilyHandle, (ByteBuffer) i.getArgument(1));
+        return null;
+      }).when(columnFamily).batchDelete(any(ManagedWriteBatch.class), 
any(ByteBuffer.class));
+
+      when(columnFamily.getHandle()).thenReturn(columnFamilyHandle);
+      when(columnFamilyHandle.getName()).thenReturn(string2Bytes("test"));
+      when(columnFamily.getName()).thenReturn("test");
+      Codec<String> codec = StringCodec.get();
+      // OP1: This should be skipped in favor of OP9.
+      batchOperation.put(columnFamily, codec.toDirectCodecBuffer("key01"), 
codec.toDirectCodecBuffer("value01"));
+      // OP2
+      batchOperation.put(columnFamily, codec.toPersistedFormat("key02"), 
codec.toPersistedFormat("value02"));
+      // OP3: This should be skipped in favor of OP4.
+      batchOperation.put(columnFamily, codec.toDirectCodecBuffer("key03"), 
codec.toDirectCodecBuffer("value03"));
+      // OP4
+      batchOperation.put(columnFamily, codec.toPersistedFormat("key03"), 
codec.toPersistedFormat("value04"));
+      // OP5
+      batchOperation.delete(columnFamily, codec.toDirectCodecBuffer("key05"));
+      // OP6
+      batchOperation.delete(columnFamily, codec.toPersistedFormat("key10"));
+      // OP7
+      batchOperation.put(columnFamily, codec.toDirectCodecBuffer("key04"), 
codec.toDirectCodecBuffer("value04"));
+      // OP8
+      batchOperation.put(columnFamily, codec.toPersistedFormat("key06"), 
codec.toPersistedFormat("value05"));
+      //OP9
+      batchOperation.put(columnFamily, codec.toDirectCodecBuffer("key01"), 
codec.toDirectCodecBuffer("value011"));
+
+
+      RocksDatabase db = Mockito.mock(RocksDatabase.class);
+      doNothing().when(db).batchWrite(any());
+      batchOperation.commit(db);
+      Set<Operation> expectedOps = ImmutableSet.of(
+          getOperation("key01", "value011", OpType.PUT_DIRECT),
+          getOperation("key02", "value02", OpType.PUT_DIRECT),
+          getOperation("key03", "value04", OpType.PUT_DIRECT),
+          getOperation("key05", null, OpType.DELETE_DIRECT),
+          getOperation("key10", null, OpType.DELETE_DIRECT),
+          getOperation("key04", "value04", OpType.PUT_DIRECT),
+          getOperation("key06", "value05", OpType.PUT_DIRECT));
+      assertEquals(Collections.singleton("test"), 
writeBatch.getOperations().keySet());
+      assertEquals(expectedOps, new 
HashSet<>(writeBatch.getOperations().get("test")));
+    }
+  }
+
+  private DBStore getDBStore(OzoneConfiguration conf, String name, String 
tableName) throws RocksDatabaseException {
+    return DBStoreBuilder.newBuilder(conf)
+        .setName(name).setPath(tempDir).addTable(tableName).build();
+  }
+
+  private void performPut(Table<String, String> withBatchTable, BatchOperation 
batchOperation,
+      Table<String, String> withoutBatchTable, String key) throws 
RocksDatabaseException, CodecException {
+    String value = getRandomString();
+    withBatchTable.putWithBatch(batchOperation, key, value);
+    withoutBatchTable.put(key, value);
+  }
+
+  private void performDelete(Table<String, String> withBatchTable, 
BatchOperation batchOperation,
+      Table<String, String> withoutBatchTable, String key) throws 
RocksDatabaseException, CodecException {
+    withBatchTable.deleteWithBatch(batchOperation, key);
+    withoutBatchTable.delete(key);
+  }
+
+  private String getRandomString() {
+    int length = ThreadLocalRandom.current().nextInt(1, 1024);
+    return RandomStringUtils.secure().next(length);
+  }
+
+  private void performOpWithRandomKey(CheckedConsumer<String, IOException> op, 
Set<String> keySet,
+      List<String> keyList) throws IOException {
+    String key = getRandomString();
+    op.accept(key);
+    if (!keySet.contains(key)) {
+      keyList.add(key);
+      keySet.add(key);
+    }
+  }
+
+  private void performOpWithRandomPreExistingKey(CheckedConsumer<String, 
IOException> op, List<String> keyList)
+      throws IOException {
+    int randomIndex = ThreadLocalRandom.current().nextInt(0, keyList.size());
+    op.accept(keyList.get(randomIndex));
+  }
+
+  @Test
+  public void testRDBBatchOperationWithRDB() throws IOException {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    String tableName = "test";
+    try (DBStore dbStore1 = getDBStore(conf, "WithBatch.db", tableName);
+         DBStore dbStore2 = getDBStore(conf, "WithoutBatch.db", tableName)) {

Review Comment:
   Let's call them dbStoreWithBatch and dbStoreWithoutBatch



##########
hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java:
##########
@@ -336,21 +323,25 @@ void overwriteIfExists(Bytes key, Op op) {
 
       void put(CodecBuffer key, CodecBuffer value) {
         putCount++;
-        // always release the key with the value
-        Bytes keyBytes = Bytes.newBytes(key);
-        overwriteIfExists(keyBytes, new PutOp(key, value, keyBytes));
+        overwriteIfExists(new PutOp(key, value));
       }
 
       void put(byte[] key, byte[] value) {
         putCount++;
-        Bytes keyBytes = new Bytes(key);
-        overwriteIfExists(keyBytes, new ByteArrayPutOp(key, value, keyBytes));
+        CodecBuffer keyBuffer = 
DIRECT_CODEC_BUFFER_CODEC.fromPersistedFormat(key);
+        CodecBuffer valueBuffer = 
DIRECT_CODEC_BUFFER_CODEC.fromPersistedFormat(value);
+        overwriteIfExists(new PutOp(keyBuffer, valueBuffer));
       }
 
       void delete(byte[] key) {

Review Comment:
   Remove all the put and delete methods with byte[].  Except for tests, they 
are only used by RDBTable once.  Just convert the byte[] to CodecBuffer there.



##########
hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java:
##########
@@ -192,20 +191,20 @@ DBUpdatesWrapper getUpdatesSince(long sequenceNumber, 
long limitCount)
    * @return a closable iterator over merged key-value pairs, where each key 
corresponds
    *         to a collection of values from the tables
    */
-  default <KEY> ClosableIterator<KeyValue<KEY, Collection<Object>>> 
getMergeIterator(
+  default <KEY> ClosableIterator<KeyValue<KEY, List<Object>>> getMergeIterator(

Review Comment:
   This method currently only used in test.  What is it for?



##########
hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBBatchOperation.java:
##########
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.utils.db;
+
+import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.when;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.primitives.UnsignedBytes;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ThreadLocalRandom;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteBatch;
+import 
org.apache.hadoop.hdds.utils.db.managed.TrackingUtilManagedWriteBatchForTesting;
+import 
org.apache.hadoop.hdds.utils.db.managed.TrackingUtilManagedWriteBatchForTesting.OpType;
+import 
org.apache.hadoop.hdds.utils.db.managed.TrackingUtilManagedWriteBatchForTesting.Operation;
+import org.apache.hadoop.ozone.util.ClosableIterator;
+import org.apache.ratis.util.function.CheckedConsumer;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
+import org.mockito.Mockito;
+import org.rocksdb.ColumnFamilyHandle;
+import org.rocksdb.RocksDBException;
+
+/**
+ * The TestRDBBatchOperation class provides test cases to validate the 
functionality of RDB batch operations
+ * in a RocksDB-based backend. It verifies the correct behavior of write 
operations using batch processing
+ * and ensures the integrity of operations like put and delete when performed 
in batch mode.
+ */
+public class TestRDBBatchOperation {
+
+  static {
+    ManagedRocksObjectUtils.loadRocksDBLibrary();
+  }
+
+  @TempDir
+  private Path tempDir;
+
+  private static Operation getOperation(String key, String value, OpType 
opType) {
+    return new Operation(string2Bytes(key), value == null ? null : 
string2Bytes(value), opType);
+  }
+
+  @Test
+  public void testBatchOperation() throws RocksDatabaseException, 
CodecException, RocksDBException {
+    try (TrackingUtilManagedWriteBatchForTesting writeBatch = new 
TrackingUtilManagedWriteBatchForTesting();
+         RDBBatchOperation batchOperation = 
RDBBatchOperation.newAtomicOperation(writeBatch)) {
+      ColumnFamilyHandle columnFamilyHandle = 
Mockito.mock(ColumnFamilyHandle.class);
+      RocksDatabase.ColumnFamily columnFamily = 
Mockito.mock(RocksDatabase.ColumnFamily.class);
+      doAnswer((i) -> {
+        ((ManagedWriteBatch)i.getArgument(0))
+            .put(columnFamilyHandle, (ByteBuffer) i.getArgument(1), 
(ByteBuffer) i.getArgument(2));
+        return null;
+      }).when(columnFamily).batchPut(any(ManagedWriteBatch.class), 
any(ByteBuffer.class), any(ByteBuffer.class));
+
+      doAnswer((i) -> {
+        ((ManagedWriteBatch)i.getArgument(0))
+            .delete(columnFamilyHandle, (ByteBuffer) i.getArgument(1));
+        return null;
+      }).when(columnFamily).batchDelete(any(ManagedWriteBatch.class), 
any(ByteBuffer.class));
+
+      when(columnFamily.getHandle()).thenReturn(columnFamilyHandle);
+      when(columnFamilyHandle.getName()).thenReturn(string2Bytes("test"));
+      when(columnFamily.getName()).thenReturn("test");
+      Codec<String> codec = StringCodec.get();
+      // OP1: This should be skipped in favor of OP9.
+      batchOperation.put(columnFamily, codec.toDirectCodecBuffer("key01"), 
codec.toDirectCodecBuffer("value01"));
+      // OP2
+      batchOperation.put(columnFamily, codec.toPersistedFormat("key02"), 
codec.toPersistedFormat("value02"));
+      // OP3: This should be skipped in favor of OP4.
+      batchOperation.put(columnFamily, codec.toDirectCodecBuffer("key03"), 
codec.toDirectCodecBuffer("value03"));
+      // OP4
+      batchOperation.put(columnFamily, codec.toPersistedFormat("key03"), 
codec.toPersistedFormat("value04"));
+      // OP5
+      batchOperation.delete(columnFamily, codec.toDirectCodecBuffer("key05"));
+      // OP6
+      batchOperation.delete(columnFamily, codec.toPersistedFormat("key10"));
+      // OP7
+      batchOperation.put(columnFamily, codec.toDirectCodecBuffer("key04"), 
codec.toDirectCodecBuffer("value04"));
+      // OP8
+      batchOperation.put(columnFamily, codec.toPersistedFormat("key06"), 
codec.toPersistedFormat("value05"));
+      //OP9
+      batchOperation.put(columnFamily, codec.toDirectCodecBuffer("key01"), 
codec.toDirectCodecBuffer("value011"));
+
+
+      RocksDatabase db = Mockito.mock(RocksDatabase.class);
+      doNothing().when(db).batchWrite(any());
+      batchOperation.commit(db);
+      Set<Operation> expectedOps = ImmutableSet.of(
+          getOperation("key01", "value011", OpType.PUT_DIRECT),
+          getOperation("key02", "value02", OpType.PUT_DIRECT),
+          getOperation("key03", "value04", OpType.PUT_DIRECT),
+          getOperation("key05", null, OpType.DELETE_DIRECT),
+          getOperation("key10", null, OpType.DELETE_DIRECT),
+          getOperation("key04", "value04", OpType.PUT_DIRECT),
+          getOperation("key06", "value05", OpType.PUT_DIRECT));
+      assertEquals(Collections.singleton("test"), 
writeBatch.getOperations().keySet());
+      assertEquals(expectedOps, new 
HashSet<>(writeBatch.getOperations().get("test")));
+    }
+  }
+
+  private DBStore getDBStore(OzoneConfiguration conf, String name, String 
tableName) throws RocksDatabaseException {
+    return DBStoreBuilder.newBuilder(conf)
+        .setName(name).setPath(tempDir).addTable(tableName).build();
+  }
+
+  private void performPut(Table<String, String> withBatchTable, BatchOperation 
batchOperation,
+      Table<String, String> withoutBatchTable, String key) throws 
RocksDatabaseException, CodecException {
+    String value = getRandomString();
+    withBatchTable.putWithBatch(batchOperation, key, value);
+    withoutBatchTable.put(key, value);
+  }
+
+  private void performDelete(Table<String, String> withBatchTable, 
BatchOperation batchOperation,
+      Table<String, String> withoutBatchTable, String key) throws 
RocksDatabaseException, CodecException {
+    withBatchTable.deleteWithBatch(batchOperation, key);
+    withoutBatchTable.delete(key);
+  }
+
+  private String getRandomString() {
+    int length = ThreadLocalRandom.current().nextInt(1, 1024);
+    return RandomStringUtils.secure().next(length);
+  }
+
+  private void performOpWithRandomKey(CheckedConsumer<String, IOException> op, 
Set<String> keySet,
+      List<String> keyList) throws IOException {
+    String key = getRandomString();
+    op.accept(key);
+    if (!keySet.contains(key)) {
+      keyList.add(key);
+      keySet.add(key);
+    }
+  }
+
+  private void performOpWithRandomPreExistingKey(CheckedConsumer<String, 
IOException> op, List<String> keyList)
+      throws IOException {
+    int randomIndex = ThreadLocalRandom.current().nextInt(0, keyList.size());
+    op.accept(keyList.get(randomIndex));
+  }
+
+  @Test
+  public void testRDBBatchOperationWithRDB() throws IOException {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    String tableName = "test";
+    try (DBStore dbStore1 = getDBStore(conf, "WithBatch.db", tableName);
+         DBStore dbStore2 = getDBStore(conf, "WithoutBatch.db", tableName)) {
+      try (BatchOperation batchOperation = dbStore1.initBatchOperation()) {
+        Table<String, String> withBatchTable = dbStore1.getTable(tableName, 
StringCodec.get(), StringCodec.get());
+        Table<String, String> withoutBatchTable = dbStore2.getTable(tableName, 
StringCodec.get(), StringCodec.get());
+        List<String> keyList = new ArrayList<>();
+        Set<String> keySet = new HashSet<>();
+        List<CheckedConsumer<String, IOException>> ops = Arrays.asList(
+            (key) -> performPut(withBatchTable, batchOperation, 
withoutBatchTable, key),
+            (key) -> performDelete(withBatchTable, batchOperation, 
withoutBatchTable, key));
+        for (int i = 0; i < 30000; i++) {
+          CheckedConsumer<String, IOException> op = 
ops.get(ThreadLocalRandom.current().nextInt(ops.size()));
+          boolean performWithPreExistingKey = 
ThreadLocalRandom.current().nextBoolean();
+          if (performWithPreExistingKey && !keyList.isEmpty()) {
+            performOpWithRandomPreExistingKey(op, keyList);
+          } else {
+            performOpWithRandomKey(op, keySet, keyList);
+          }
+        }
+        dbStore1.commitBatchOperation(batchOperation);
+      }
+      Table<byte[], ?> withBatchTable = dbStore1.getTable(tableName, 
ByteArrayCodec.get(), StringCodec.get());
+      Table<byte[], ?> withoutBatchTable = dbStore2.getTable(tableName, 
ByteArrayCodec.get(), StringCodec.get());
+      try (ClosableIterator<KeyValue<byte[], List<Object>>> itr = 
dbStore1.getMergeIterator(

Review Comment:
   Use two iterators instead of getMergeIterator(..).  How do you know if 
getMergeIterator(..) can iterator the tables correct?
   
   BTW, getMergeIterator API does not make sense:
   - How could dbStore1.getMergeIterator(..) get an iterator of 
withoutBatchTable even withoutBatchTable is not in dbStore1 !?



##########
hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBBatchOperation.java:
##########
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.utils.db;
+
+import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.when;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.primitives.UnsignedBytes;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ThreadLocalRandom;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteBatch;
+import 
org.apache.hadoop.hdds.utils.db.managed.TrackingUtilManagedWriteBatchForTesting;
+import 
org.apache.hadoop.hdds.utils.db.managed.TrackingUtilManagedWriteBatchForTesting.OpType;
+import 
org.apache.hadoop.hdds.utils.db.managed.TrackingUtilManagedWriteBatchForTesting.Operation;
+import org.apache.hadoop.ozone.util.ClosableIterator;
+import org.apache.ratis.util.function.CheckedConsumer;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
+import org.mockito.Mockito;
+import org.rocksdb.ColumnFamilyHandle;
+import org.rocksdb.RocksDBException;
+
+/**
+ * The TestRDBBatchOperation class provides test cases to validate the 
functionality of RDB batch operations
+ * in a RocksDB-based backend. It verifies the correct behavior of write 
operations using batch processing
+ * and ensures the integrity of operations like put and delete when performed 
in batch mode.
+ */
+public class TestRDBBatchOperation {
+
+  static {
+    ManagedRocksObjectUtils.loadRocksDBLibrary();
+  }
+
+  @TempDir
+  private Path tempDir;
+
+  private static Operation getOperation(String key, String value, OpType 
opType) {
+    return new Operation(string2Bytes(key), value == null ? null : 
string2Bytes(value), opType);
+  }
+
+  @Test
+  public void testBatchOperation() throws RocksDatabaseException, 
CodecException, RocksDBException {
+    try (TrackingUtilManagedWriteBatchForTesting writeBatch = new 
TrackingUtilManagedWriteBatchForTesting();
+         RDBBatchOperation batchOperation = 
RDBBatchOperation.newAtomicOperation(writeBatch)) {
+      ColumnFamilyHandle columnFamilyHandle = 
Mockito.mock(ColumnFamilyHandle.class);
+      RocksDatabase.ColumnFamily columnFamily = 
Mockito.mock(RocksDatabase.ColumnFamily.class);
+      doAnswer((i) -> {
+        ((ManagedWriteBatch)i.getArgument(0))
+            .put(columnFamilyHandle, (ByteBuffer) i.getArgument(1), 
(ByteBuffer) i.getArgument(2));
+        return null;
+      }).when(columnFamily).batchPut(any(ManagedWriteBatch.class), 
any(ByteBuffer.class), any(ByteBuffer.class));
+
+      doAnswer((i) -> {
+        ((ManagedWriteBatch)i.getArgument(0))
+            .delete(columnFamilyHandle, (ByteBuffer) i.getArgument(1));
+        return null;
+      }).when(columnFamily).batchDelete(any(ManagedWriteBatch.class), 
any(ByteBuffer.class));
+
+      when(columnFamily.getHandle()).thenReturn(columnFamilyHandle);
+      when(columnFamilyHandle.getName()).thenReturn(string2Bytes("test"));
+      when(columnFamily.getName()).thenReturn("test");
+      Codec<String> codec = StringCodec.get();
+      // OP1: This should be skipped in favor of OP9.
+      batchOperation.put(columnFamily, codec.toDirectCodecBuffer("key01"), 
codec.toDirectCodecBuffer("value01"));
+      // OP2
+      batchOperation.put(columnFamily, codec.toPersistedFormat("key02"), 
codec.toPersistedFormat("value02"));
+      // OP3: This should be skipped in favor of OP4.
+      batchOperation.put(columnFamily, codec.toDirectCodecBuffer("key03"), 
codec.toDirectCodecBuffer("value03"));
+      // OP4
+      batchOperation.put(columnFamily, codec.toPersistedFormat("key03"), 
codec.toPersistedFormat("value04"));
+      // OP5
+      batchOperation.delete(columnFamily, codec.toDirectCodecBuffer("key05"));
+      // OP6
+      batchOperation.delete(columnFamily, codec.toPersistedFormat("key10"));
+      // OP7
+      batchOperation.put(columnFamily, codec.toDirectCodecBuffer("key04"), 
codec.toDirectCodecBuffer("value04"));
+      // OP8
+      batchOperation.put(columnFamily, codec.toPersistedFormat("key06"), 
codec.toPersistedFormat("value05"));
+      //OP9
+      batchOperation.put(columnFamily, codec.toDirectCodecBuffer("key01"), 
codec.toDirectCodecBuffer("value011"));
+
+
+      RocksDatabase db = Mockito.mock(RocksDatabase.class);
+      doNothing().when(db).batchWrite(any());
+      batchOperation.commit(db);
+      Set<Operation> expectedOps = ImmutableSet.of(
+          getOperation("key01", "value011", OpType.PUT_DIRECT),
+          getOperation("key02", "value02", OpType.PUT_DIRECT),
+          getOperation("key03", "value04", OpType.PUT_DIRECT),
+          getOperation("key05", null, OpType.DELETE_DIRECT),
+          getOperation("key10", null, OpType.DELETE_DIRECT),
+          getOperation("key04", "value04", OpType.PUT_DIRECT),
+          getOperation("key06", "value05", OpType.PUT_DIRECT));
+      assertEquals(Collections.singleton("test"), 
writeBatch.getOperations().keySet());
+      assertEquals(expectedOps, new 
HashSet<>(writeBatch.getOperations().get("test")));
+    }
+  }
+
+  private DBStore getDBStore(OzoneConfiguration conf, String name, String 
tableName) throws RocksDatabaseException {
+    return DBStoreBuilder.newBuilder(conf)
+        .setName(name).setPath(tempDir).addTable(tableName).build();
+  }
+
+  private void performPut(Table<String, String> withBatchTable, BatchOperation 
batchOperation,
+      Table<String, String> withoutBatchTable, String key) throws 
RocksDatabaseException, CodecException {
+    String value = getRandomString();
+    withBatchTable.putWithBatch(batchOperation, key, value);
+    withoutBatchTable.put(key, value);
+  }
+
+  private void performDelete(Table<String, String> withBatchTable, 
BatchOperation batchOperation,
+      Table<String, String> withoutBatchTable, String key) throws 
RocksDatabaseException, CodecException {
+    withBatchTable.deleteWithBatch(batchOperation, key);
+    withoutBatchTable.delete(key);
+  }
+
+  private String getRandomString() {
+    int length = ThreadLocalRandom.current().nextInt(1, 1024);
+    return RandomStringUtils.secure().next(length);

Review Comment:
   Why secure?



##########
hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBBatchOperation.java:
##########
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.utils.db;
+
+import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.when;
+
+import com.google.common.collect.ImmutableSet;
+import java.nio.ByteBuffer;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteBatch;
+import org.apache.hadoop.hdds.utils.db.managed.TrackingUtilManagedWriteBatch;
+import 
org.apache.hadoop.hdds.utils.db.managed.TrackingUtilManagedWriteBatch.OpType;
+import 
org.apache.hadoop.hdds.utils.db.managed.TrackingUtilManagedWriteBatch.Operation;
+import org.junit.jupiter.api.Test;
+import org.mockito.Mockito;
+import org.rocksdb.ColumnFamilyHandle;
+import org.rocksdb.RocksDBException;
+
+/**
+ * Test class for verifying batch operations with delete ranges using the
+ * RDBBatchOperation and MockedConstruction of ManagedWriteBatch.
+ *
+ * This test class includes:
+ * - Mocking and tracking of operations including put, delete, and delete range
+ *   within a batch operation.
+ * - Validation of committed operations using assertions on collected data.
+ * - Ensures that the batch operation interacts correctly with the
+ *   RocksDatabase and ColumnFamilyHandle components.
+ *
+ * The test method includes:
+ * 1. Setup of mocked ColumnFamilyHandle and RocksDatabase.ColumnFamily.
+ * 2. Mocking of methods to track operations performed on*/
+public class TestRDBBatchOperation {
+
+  static {
+    ManagedRocksObjectUtils.loadRocksDBLibrary();
+  }
+
+  private static Operation getOperation(String key, String value, OpType 
opType) {
+    return new Operation(string2Bytes(key), value == null ? null : 
string2Bytes(value), opType);
+  }
+
+  @Test
+  public void testBatchOperation() throws RocksDatabaseException, 
CodecException, RocksDBException {
+    try (TrackingUtilManagedWriteBatch writeBatch = new 
TrackingUtilManagedWriteBatch();
+        RDBBatchOperation batchOperation = 
RDBBatchOperation.newAtomicOperation(writeBatch)) {
+      ColumnFamilyHandle columnFamilyHandle = 
Mockito.mock(ColumnFamilyHandle.class);
+      RocksDatabase.ColumnFamily columnFamily = 
Mockito.mock(RocksDatabase.ColumnFamily.class);
+      doAnswer((i) -> {
+        ((ManagedWriteBatch)i.getArgument(0))
+            .put(columnFamilyHandle, (ByteBuffer) i.getArgument(1), 
(ByteBuffer) i.getArgument(2));
+        return null;
+      }).when(columnFamily).batchPut(any(ManagedWriteBatch.class), 
any(ByteBuffer.class), any(ByteBuffer.class));
+
+      doAnswer((i) -> {
+        ((ManagedWriteBatch)i.getArgument(0))
+            .delete(columnFamilyHandle, (ByteBuffer) i.getArgument(1));
+        return null;
+      }).when(columnFamily).batchDelete(any(ManagedWriteBatch.class), 
any(ByteBuffer.class));
+
+      when(columnFamily.getHandle()).thenReturn(columnFamilyHandle);
+      when(columnFamilyHandle.getName()).thenReturn(string2Bytes("test"));
+      when(columnFamily.getName()).thenReturn("test");
+      Codec<String> codec = StringCodec.get();
+      // OP1: This should be skipped in favor of OP9.
+      batchOperation.put(columnFamily, codec.toDirectCodecBuffer("key01"), 
codec.toDirectCodecBuffer("value01"));
+      // OP2
+      batchOperation.put(columnFamily, codec.toPersistedFormat("key02"), 
codec.toPersistedFormat("value02"));
+      // OP3: This should be skipped in favor of OP4.
+      batchOperation.put(columnFamily, codec.toDirectCodecBuffer("key03"), 
codec.toDirectCodecBuffer("value03"));
+      // OP4
+      batchOperation.put(columnFamily, codec.toPersistedFormat("key03"), 
codec.toPersistedFormat("value04"));
+      // OP5
+      batchOperation.delete(columnFamily, codec.toDirectCodecBuffer("key05"));
+      // OP6
+      batchOperation.delete(columnFamily, codec.toPersistedFormat("key10"));
+      // OP7
+      batchOperation.put(columnFamily, codec.toDirectCodecBuffer("key04"), 
codec.toDirectCodecBuffer("value04"));
+      // OP8
+      batchOperation.put(columnFamily, codec.toPersistedFormat("key06"), 
codec.toPersistedFormat("value05"));
+      //OP9
+      batchOperation.put(columnFamily, codec.toDirectCodecBuffer("key01"), 
codec.toDirectCodecBuffer("value011"));

Review Comment:
   Do we still need this hardcoded test?  How about removing it?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to