szetszwo commented on code in PR #9552: URL: https://github.com/apache/ozone/pull/9552#discussion_r2653727734
########## hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBBatchOperation.java: ########## @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.utils.db; + +import static org.apache.hadoop.hdds.StringUtils.string2Bytes; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.when; + +import com.google.common.collect.ImmutableSet; +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils; +import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteBatch; +import org.apache.hadoop.hdds.utils.db.managed.TrackingUtilManagedWriteBatch; +import org.apache.hadoop.hdds.utils.db.managed.TrackingUtilManagedWriteBatch.OpType; +import org.apache.hadoop.hdds.utils.db.managed.TrackingUtilManagedWriteBatch.Operation; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; +import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.RocksDBException; + +/** + * Test class for verifying batch operations with delete ranges using the Review Comment: Do we have "delete ranges" here? ########## hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java: ########## @@ -136,15 +137,29 @@ public void close() { } private abstract static class Op implements Closeable { + private final CodecBuffer keyBuffer; private final Bytes keyBytes; + private final AtomicBoolean closed = new AtomicBoolean(false); + + private Op(CodecBuffer keyBuffer) { + this.keyBuffer = keyBuffer; + this.keyBytes = keyBuffer == null ? null : Bytes.newBytes(keyBuffer); Review Comment: Actually, deleteRange is a very different operation. We should either - don't make it extends Op, or - add one more intermediate class, say SingleKeyOp. Then, DeleteRangeOp extends Op and all other op extend SingleKeyOp. Allowing null definitely is not a good design. ########## hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBBatchOperation.java: ########## @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.utils.db; + +import static org.apache.hadoop.hdds.StringUtils.string2Bytes; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.when; + +import com.google.common.collect.ImmutableSet; +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils; +import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteBatch; +import org.apache.hadoop.hdds.utils.db.managed.TrackingUtilManagedWriteBatch; +import org.apache.hadoop.hdds.utils.db.managed.TrackingUtilManagedWriteBatch.OpType; +import org.apache.hadoop.hdds.utils.db.managed.TrackingUtilManagedWriteBatch.Operation; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; +import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.RocksDBException; + +/** + * Test class for verifying batch operations with delete ranges using the + * RDBBatchOperation and MockedConstruction of ManagedWriteBatch. + * + * This test class includes: + * - Mocking and tracking of operations including put, delete, and delete range + * within a batch operation. + * - Validation of committed operations using assertions on collected data. + * - Ensures that the batch operation interacts correctly with the + * RocksDatabase and ColumnFamilyHandle components. + * + * The test method includes: + * 1. Setup of mocked ColumnFamilyHandle and RocksDatabase.ColumnFamily. + * 2. Mocking of methods to track operations performed on*/ +public class TestRDBBatchOperation { + + static { + ManagedRocksObjectUtils.loadRocksDBLibrary(); + } + + private static Operation getOperation(String key, String value, OpType opType) { + return new Operation(string2Bytes(key), value == null ? null : string2Bytes(value), opType); + } + + @Test + public void testBatchOperation() throws RocksDatabaseException, CodecException, RocksDBException { + try (TrackingUtilManagedWriteBatch writeBatch = new TrackingUtilManagedWriteBatch(); + RDBBatchOperation batchOperation = RDBBatchOperation.newAtomicOperation(writeBatch)) { + ColumnFamilyHandle columnFamilyHandle = Mockito.mock(ColumnFamilyHandle.class); + RocksDatabase.ColumnFamily columnFamily = Mockito.mock(RocksDatabase.ColumnFamily.class); + doAnswer((i) -> { + ((ManagedWriteBatch)i.getArgument(0)) + .put(columnFamilyHandle, (ByteBuffer) i.getArgument(1), (ByteBuffer) i.getArgument(2)); + return null; + }).when(columnFamily).batchPut(any(ManagedWriteBatch.class), any(ByteBuffer.class), any(ByteBuffer.class)); + + doAnswer((i) -> { + ((ManagedWriteBatch)i.getArgument(0)) + .delete(columnFamilyHandle, (ByteBuffer) i.getArgument(1)); + return null; + }).when(columnFamily).batchDelete(any(ManagedWriteBatch.class), any(ByteBuffer.class)); + + when(columnFamily.getHandle()).thenReturn(columnFamilyHandle); + when(columnFamilyHandle.getName()).thenReturn(string2Bytes("test")); + when(columnFamily.getName()).thenReturn("test"); + Codec<String> codec = StringCodec.get(); + // OP1: This should be skipped in favor of OP9. + batchOperation.put(columnFamily, codec.toDirectCodecBuffer("key01"), codec.toDirectCodecBuffer("value01")); + // OP2 + batchOperation.put(columnFamily, codec.toPersistedFormat("key02"), codec.toPersistedFormat("value02")); + // OP3: This should be skipped in favor of OP4. + batchOperation.put(columnFamily, codec.toDirectCodecBuffer("key03"), codec.toDirectCodecBuffer("value03")); + // OP4 + batchOperation.put(columnFamily, codec.toPersistedFormat("key03"), codec.toPersistedFormat("value04")); + // OP5 + batchOperation.delete(columnFamily, codec.toDirectCodecBuffer("key05")); + // OP6 + batchOperation.delete(columnFamily, codec.toPersistedFormat("key10")); + // OP7 + batchOperation.put(columnFamily, codec.toDirectCodecBuffer("key04"), codec.toDirectCodecBuffer("value04")); + // OP8 + batchOperation.put(columnFamily, codec.toPersistedFormat("key06"), codec.toPersistedFormat("value05")); + //OP9 + batchOperation.put(columnFamily, codec.toDirectCodecBuffer("key01"), codec.toDirectCodecBuffer("value011")); Review Comment: This hard coded test is too simple. How about implementing [this idea](https://github.com/apache/ozone/pull/8774#discussion_r2644284592)? ########## hadoop-hdds/managed-rocksdb/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TrackingUtilManagedWriteBatch.java: ########## @@ -0,0 +1,195 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.utils.db.managed; + +import static org.apache.hadoop.hdds.StringUtils.bytes2String; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.RocksDBException; + +/** + * The TrackingUtilManagedWriteBatch class extends ManagedWriteBatch to provide functionality + * for tracking operations in a managed write batch context. Operations such as put, delete, + * merge, and delete range are managed and tracked, along with their corresponding operation types. + * + * This class supports direct and indirect operation types, delineated in the OpType enumeration. + * Direct operations are created using ByteBuffers while indirect operations are created using + * byte arrays. + */ +public class TrackingUtilManagedWriteBatch extends ManagedWriteBatch { Review Comment: Rename it to TrackingUtilManagedWriteBatchForTesting. ########## hadoop-hdds/managed-rocksdb/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TrackingUtilManagedWriteBatch.java: ########## @@ -0,0 +1,195 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.utils.db.managed; + +import static org.apache.hadoop.hdds.StringUtils.bytes2String; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.RocksDBException; + +/** + * The TrackingUtilManagedWriteBatch class extends ManagedWriteBatch to provide functionality + * for tracking operations in a managed write batch context. Operations such as put, delete, + * merge, and delete range are managed and tracked, along with their corresponding operation types. + * + * This class supports direct and indirect operation types, delineated in the OpType enumeration. + * Direct operations are created using ByteBuffers while indirect operations are created using + * byte arrays. + */ +public class TrackingUtilManagedWriteBatch extends ManagedWriteBatch { + + private final Map<String, List<Operation>> operations = new HashMap<>(); + + /** + * The OpType enumeration defines the different types of operations performed in a batch. + */ + public enum OpType { + PUT_DIRECT, + DELETE_DIRECT, + MERGE_DIRECT, + DELETE_RANGE_INDIRECT, + PUT_INDIRECT, + DELETE_INDIRECT, + MERGE_INDIRECT, Review Comment: What is INDIRECT? Check the doc and don't create your words. - https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
