lokeshj1703 commented on a change in pull request #1620:
URL: https://github.com/apache/ozone/pull/1620#discussion_r530254229
##########
File path:
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
##########
@@ -155,24 +178,29 @@ private void createToDeleteBlocks(ContainerSet
containerSet,
BlockData kd = new BlockData(blockID);
List<ContainerProtos.ChunkInfo> chunks = Lists.newArrayList();
for (int k = 0; k < numOfChunksPerBlock; k++) {
+ final String chunkName = String.format("block.%d.chunk.%d", j, k);
+ final long offset = k * blockLength;
ContainerProtos.ChunkInfo info =
ContainerProtos.ChunkInfo.newBuilder()
- .setChunkName(blockID.getLocalID() + "_chunk_" + k)
+ .setChunkName(chunkName)
.setLen(blockLength)
- .setOffset(0)
+ .setOffset(offset)
.setChecksumData(Checksum.getNoChecksumDataProto())
.build();
chunks.add(info);
+ ChunkInfo chunkInfo = new ChunkInfo(chunkName, offset,
blockLength);
+ ChunkBuffer chunkData = buffer.duplicate(0, (int) blockLength);
+ chunkManager.writeChunk(container, blockID, chunkInfo, chunkData,
+ WRITE_STAGE);
+ chunkManager.writeChunk(container, blockID, chunkInfo, chunkData,
+ COMMIT_STAGE);
}
kd.setChunks(chunks);
metadata.getStore().getBlockDataTable().put(
deleteStateName, kd);
container.getContainerData().incrPendingDeletionBlocks(1);
}
-
container.getContainerData().setKeyCount(numOfBlocksPerContainer);
- container.getContainerData().setBytesUsed(
- blockLength * numOfBlocksPerContainer);
Review comment:
Can we also change the value for OzoneConsts.CONTAINER_BYTES_USED
updated in table?
The new value should be `chunkLength * numOfChunksPerBlock *
numOfBlocksPerContainer`.
##########
File path:
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
##########
@@ -114,37 +119,55 @@ public static void init() throws IOException {
}
scmId = UUID.randomUUID().toString();
clusterID = UUID.randomUUID().toString();
+ conf = new OzoneConfiguration();
+ conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath());
+ datanodeUuid = UUID.randomUUID().toString();
+ volumeSet = new MutableVolumeSet(datanodeUuid, conf);
}
@AfterClass
public static void cleanup() throws IOException {
FileUtils.deleteDirectory(testRoot);
}
+ private static final DispatcherContext WRITE_STAGE =
+ new DispatcherContext.Builder()
+ .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA).build();
+
+ private static final DispatcherContext COMMIT_STAGE =
+ new DispatcherContext.Builder()
+ .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build();
+
/**
* A helper method to create some blocks and put them under deletion
* state for testing. This method directly updates container.db and
* creates some fake chunk files for testing.
*/
private void createToDeleteBlocks(ContainerSet containerSet,
- MutableConfigurationSource conf, int numOfContainers,
+ int numOfContainers,
int numOfBlocksPerContainer,
int numOfChunksPerBlock) throws IOException {
+ ChunkManager chunkManager;
+ if (layout == FILE_PER_BLOCK) {
+ chunkManager = new FilePerBlockStrategy(true);
+ } else {
+ chunkManager = new FilePerChunkStrategy(true, null);
+ }
Review comment:
Lets initialise chunkManager in the init function.
##########
File path:
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
##########
@@ -114,37 +119,55 @@ public static void init() throws IOException {
}
scmId = UUID.randomUUID().toString();
clusterID = UUID.randomUUID().toString();
+ conf = new OzoneConfiguration();
+ conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath());
+ datanodeUuid = UUID.randomUUID().toString();
+ volumeSet = new MutableVolumeSet(datanodeUuid, conf);
}
@AfterClass
public static void cleanup() throws IOException {
FileUtils.deleteDirectory(testRoot);
}
+ private static final DispatcherContext WRITE_STAGE =
+ new DispatcherContext.Builder()
+ .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA).build();
+
+ private static final DispatcherContext COMMIT_STAGE =
+ new DispatcherContext.Builder()
+ .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build();
+
/**
* A helper method to create some blocks and put them under deletion
* state for testing. This method directly updates container.db and
* creates some fake chunk files for testing.
*/
private void createToDeleteBlocks(ContainerSet containerSet,
- MutableConfigurationSource conf, int numOfContainers,
+ int numOfContainers,
int numOfBlocksPerContainer,
int numOfChunksPerBlock) throws IOException {
+ ChunkManager chunkManager;
+ if (layout == FILE_PER_BLOCK) {
+ chunkManager = new FilePerBlockStrategy(true);
+ } else {
+ chunkManager = new FilePerChunkStrategy(true, null);
+ }
+ byte[] arr = randomAlphanumeric(1048576).getBytes(UTF_8);
+ ChunkBuffer buffer = ChunkBuffer.wrap(ByteBuffer.wrap(arr));
for (int x = 0; x < numOfContainers; x++) {
- conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY,
testRoot.getAbsolutePath());
long containerID = ContainerTestHelper.getTestContainerID();
- KeyValueContainerData data = new KeyValueContainerData(containerID,
- layout,
- ContainerTestHelper.CONTAINER_MAX_SIZE, UUID.randomUUID().toString(),
- UUID.randomUUID().toString());
+ KeyValueContainerData data =
+ new KeyValueContainerData(containerID, layout,
+ ContainerTestHelper.CONTAINER_MAX_SIZE,
+ UUID.randomUUID().toString(), datanodeUuid);
data.closeContainer();
KeyValueContainer container = new KeyValueContainer(data, conf);
- container.create(new MutableVolumeSet(scmId, clusterID, conf),
+ container.create(volumeSet,
new RoundRobinVolumeChoosingPolicy(), scmId);
containerSet.addContainer(container);
data = (KeyValueContainerData) containerSet.getContainer(
containerID).getContainerData();
-
long blockLength = 100;
Review comment:
Can we rename this to chunkLength?
##########
File path:
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
##########
@@ -38,50 +37,53 @@
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.utils.BackgroundService;
import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
-import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.common.Checksum;
+import org.apache.hadoop.ozone.common.ChunkBuffer;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList;
+import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
import org.apache.hadoop.ozone.container.common.impl.ContainerData;
import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
import
org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy;
import org.apache.hadoop.ozone.container.common.interfaces.Container;
import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
-import org.apache.hadoop.ozone.container.common.interfaces.Handler;
-import
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
+import
org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
import
org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
+import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
+import org.apache.hadoop.ozone.container.keyvalue.impl.FilePerBlockStrategy;
+import org.apache.hadoop.ozone.container.keyvalue.impl.FilePerChunkStrategy;
+import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
import
org.apache.hadoop.ozone.container.keyvalue.statemachine.background.BlockDeletingService;
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
import
org.apache.hadoop.ozone.container.testutils.BlockDeletingServiceTestImpl;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import static org.apache.commons.lang3.RandomStringUtils.randomAlphanumeric;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
-
+import static org.apache.hadoop.ozone.OzoneConfigKeys.*;
+import static
org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.*;
Review comment:
Star import.
##########
File path:
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
##########
@@ -240,40 +270,37 @@ public void testBlockDeletion() throws Exception {
.get(containerData.get(0).getContainerID()).getContainerData())
.getDeleteTransactionId();
-
+ long containerSpace = containerData.get(0).getBytesUsed();
// Number of deleted blocks in container should be equal to 0 before
// block delete
+
Assert.assertEquals(0, transactionId);
// Ensure there are 3 blocks under deletion and 0 deleted blocks
Assert.assertEquals(3, getUnderDeletionBlocksCount(meta));
- Assert.assertEquals(3,
- meta.getStore().getMetadataTable()
- .get(OzoneConsts.PENDING_DELETE_BLOCK_COUNT).longValue());
- Assert.assertEquals(0, getDeletedBlocksCount(meta));
+ Assert.assertEquals(3, meta.getStore().getMetadataTable()
+ .get(OzoneConsts.PENDING_DELETE_BLOCK_COUNT).longValue());
+
+ // Container contains 3 blocks. So, space used by the container
+ // should be greater than zero.
+ Assert.assertTrue(containerSpace > 0);
// An interval will delete 1 * 2 blocks
deleteAndWait(svc, 1);
Review comment:
We can add an assertion after this call.
`Assert.assertTrue(containerData.get(0).getBytesUsed() < containerSpace);`
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]