errose28 commented on code in PR #7943:
URL: https://github.com/apache/ozone/pull/7943#discussion_r1978026416
##########
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java:
##########
@@ -196,6 +200,65 @@ public void testPutAndGetBlock(ContainerTestVersionInfo
versionInfo)
}
+ @ContainerTestVersionInfo.ContainerTest
+ public void testPutBlockForClosed(ContainerTestVersionInfo versionInfo)
+ throws Exception {
+ initTest(versionInfo);
+ KeyValueContainerData containerData = keyValueContainer.getContainerData();
+ assertEquals(0, containerData.getBlockCount());
+ keyValueContainer.close();
+ assertEquals(CLOSED, keyValueContainer.getContainerState());
+ // 1. Put Block with bcsId = 2, Overwrite = true
+ blockManager.putBlockForClosedContainer(keyValueContainer, blockData1,
true);
Review Comment:
Let's create this block from scratch in this test. If something else changes
this class level field later it will affect how this test works and what it
covers.
##########
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java:
##########
@@ -196,6 +200,65 @@ public void testPutAndGetBlock(ContainerTestVersionInfo
versionInfo)
}
+ @ContainerTestVersionInfo.ContainerTest
+ public void testPutBlockForClosed(ContainerTestVersionInfo versionInfo)
+ throws Exception {
+ initTest(versionInfo);
+ KeyValueContainerData containerData = keyValueContainer.getContainerData();
+ assertEquals(0, containerData.getBlockCount());
+ keyValueContainer.close();
+ assertEquals(CLOSED, keyValueContainer.getContainerState());
+ // 1. Put Block with bcsId = 2, Overwrite = true
Review Comment:
```suggestion
// 1. Put Block with bcsId = 2, Overwrite BCS ID = true
```
##########
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java:
##########
@@ -196,6 +200,65 @@ public void testPutAndGetBlock(ContainerTestVersionInfo
versionInfo)
}
+ @ContainerTestVersionInfo.ContainerTest
+ public void testPutBlockForClosed(ContainerTestVersionInfo versionInfo)
+ throws Exception {
+ initTest(versionInfo);
+ KeyValueContainerData containerData = keyValueContainer.getContainerData();
+ assertEquals(0, containerData.getBlockCount());
+ keyValueContainer.close();
+ assertEquals(CLOSED, keyValueContainer.getContainerState());
+ // 1. Put Block with bcsId = 2, Overwrite = true
+ blockManager.putBlockForClosedContainer(keyValueContainer, blockData1,
true);
+
+ try (DBHandle db = BlockUtils.getDB(containerData, config)) {
+ BlockData fromGetBlockData;
+ //Check Container's bcsId
+ fromGetBlockData = blockManager.getBlock(keyValueContainer,
blockData1.getBlockID());
+ assertEquals(1, containerData.getBlockCount());
+ assertEquals(1, containerData.getBlockCommitSequenceId());
+ assertEquals(1, fromGetBlockData.getBlockCommitSequenceId());
+ assertEquals(1,
db.getStore().getMetadataTable().get(containerData.getBcsIdKey()));
+ assertEquals(1,
db.getStore().getMetadataTable().get(containerData.getBlockCountKey()));
+
+ // 2. Put Block with bcsId = 2, Overwrite = false
+ BlockData blockData2 = createBlockData(1L, 3L, 1, 0, 2048, 2);
+ blockManager.putBlockForClosedContainer(keyValueContainer, blockData2,
false);
+
+ // The block should be written, but we won't be able to read it, As
BcsId < container's BcsId
+ // fails during block read.
+ Assertions.assertThrows(StorageContainerException.class, () ->
blockManager
+ .getBlock(keyValueContainer, blockData2.getBlockID()));
+ assertEquals(2, containerData.getBlockCount());
+ // BcsId should still be 1, as the BcsId is not overwritten
+ assertEquals(1, containerData.getBlockCommitSequenceId());
+ assertEquals(2,
db.getStore().getMetadataTable().get(containerData.getBlockCountKey()));
+ assertEquals(1,
db.getStore().getMetadataTable().get(containerData.getBcsIdKey()));
Review Comment:
We need a block level BCSID check here too.
##########
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java:
##########
@@ -225,37 +225,70 @@ public void testPutBlockForClosedContainer() throws
IOException {
containerSet.addContainer(kvContainer);
KeyValueHandler keyValueHandler = createKeyValueHandler(containerSet);
List<ContainerProtos.ChunkInfo> chunkInfoList = new ArrayList<>();
- chunkInfoList.add(getChunkInfo().getProtoBufMessage());
+ ChunkInfo info = new ChunkInfo(String.format("%d.data.%d",
getBlockID().getLocalID(), 0), 0L, 20L);
+
+ chunkInfoList.add(info.getProtoBufMessage());
BlockData putBlockData = new BlockData(getBlockID());
- keyValueHandler.putBlockForClosedContainer(chunkInfoList, kvContainer,
putBlockData, 1L);
- Assertions.assertEquals(containerData.getBlockCommitSequenceId(), 1L);
- Assertions.assertEquals(containerData.getBlockCount(), 1L);
+ putBlockData.setChunks(chunkInfoList);
+
+ ChunkBuffer chunkData = ContainerTestHelper.getData(20);
+ keyValueHandler.writeChunkForClosedContainer(info, getBlockID(),
chunkData, kvContainer);
+ keyValueHandler.putBlockForClosedContainer(kvContainer, putBlockData, 1L,
true);
+ assertEquals(1L, containerData.getBlockCommitSequenceId());
+ assertEquals(1L, containerData.getBlockCount());
try (DBHandle dbHandle = BlockUtils.getDB(containerData, new
OzoneConfiguration())) {
long localID = putBlockData.getLocalID();
BlockData getBlockData = dbHandle.getStore().getBlockDataTable()
.get(containerData.getBlockKey(localID));
Assertions.assertTrue(blockDataEquals(putBlockData, getBlockData));
+ // Overwriting the same
+ assertEquals(20L, containerData.getBytesUsed());
+ assertEquals(20L,
dbHandle.getStore().getMetadataTable().get(containerData.getBytesUsedKey()));
}
// Add another chunk and check the put block data
- ChunkInfo newChunkInfo = new ChunkInfo(String.format("%d.data.%d",
getBlockID()
- .getLocalID(), 1L), 0, 20L);
+ ChunkInfo newChunkInfo = new ChunkInfo(String.format("%d.data.%d",
getBlockID().getLocalID(), 1L), 20L, 20L);
+ chunkInfoList.add(newChunkInfo.getProtoBufMessage());
+ putBlockData.setChunks(chunkInfoList);
+
+ chunkData = ContainerTestHelper.getData(20);
+ keyValueHandler.writeChunkForClosedContainer(newChunkInfo, getBlockID(),
chunkData, kvContainer);
+ keyValueHandler.putBlockForClosedContainer(kvContainer, putBlockData, 2L,
true);
+ assertEquals(2L, containerData.getBlockCommitSequenceId());
+ assertEquals(1L, containerData.getBlockCount());
+
+ try (DBHandle dbHandle = BlockUtils.getDB(containerData, new
OzoneConfiguration())) {
+ long localID = putBlockData.getLocalID();
+ BlockData getBlockData = dbHandle.getStore().getBlockDataTable()
+ .get(containerData.getBlockKey(localID));
+ Assertions.assertTrue(blockDataEquals(putBlockData, getBlockData));
+ assertEquals(40L, containerData.getBytesUsed());
Review Comment:
For clarity, let's only check DB data in this try block and move the
in-memory checks before it. It's a little inconsistent between the sections of
this method right now.
##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java:
##########
@@ -98,6 +98,81 @@ public long putBlock(Container container, BlockData data,
data, endOfBlock);
}
+ @Override
+ public long putBlockForClosedContainer(Container container, BlockData data,
boolean overwriteBcsId)
+ throws IOException {
+ Preconditions.checkNotNull(data, "BlockData cannot be null for put " +
+ "operation.");
+ Preconditions.checkState(data.getContainerID() >= 0, "Container Id " +
+ "cannot be negative");
+
+ KeyValueContainerData containerData = (KeyValueContainerData)
container.getContainerData();
+
+ // We are not locking the key manager since RocksDB serializes all actions
+ // against a single DB. We rely on DB level locking to avoid conflicts.
+ try (DBHandle db = BlockUtils.getDB(containerData, config)) {
+ // This is a post condition that acts as a hint to the user.
+ // Should never fail.
+ Preconditions.checkNotNull(db, DB_NULL_ERR_MSG);
+
+ long bcsId = data.getBlockCommitSequenceId();
+ long containerBCSId = containerData.getBlockCommitSequenceId();
Review Comment:
nit. camel case
```suggestion
long containerBcsID = containerData.getBlockCommitSequenceId(); // or
containerBcsId
```
##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java:
##########
@@ -98,6 +98,81 @@ public long putBlock(Container container, BlockData data,
data, endOfBlock);
}
+ @Override
+ public long putBlockForClosedContainer(Container container, BlockData data,
boolean overwriteBcsId)
+ throws IOException {
+ Preconditions.checkNotNull(data, "BlockData cannot be null for put " +
+ "operation.");
+ Preconditions.checkState(data.getContainerID() >= 0, "Container Id " +
+ "cannot be negative");
+
+ KeyValueContainerData containerData = (KeyValueContainerData)
container.getContainerData();
+
+ // We are not locking the key manager since RocksDB serializes all actions
+ // against a single DB. We rely on DB level locking to avoid conflicts.
+ try (DBHandle db = BlockUtils.getDB(containerData, config)) {
+ // This is a post condition that acts as a hint to the user.
+ // Should never fail.
+ Preconditions.checkNotNull(db, DB_NULL_ERR_MSG);
+
+ long bcsId = data.getBlockCommitSequenceId();
+ long containerBCSId = containerData.getBlockCommitSequenceId();
Review Comment:
Our capitalizations of BCS and ID in variable names are not consistent in
the code in general, but since this method is new code the local variables
should be consistent with each other.
##########
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java:
##########
@@ -196,6 +200,65 @@ public void testPutAndGetBlock(ContainerTestVersionInfo
versionInfo)
}
+ @ContainerTestVersionInfo.ContainerTest
+ public void testPutBlockForClosed(ContainerTestVersionInfo versionInfo)
+ throws Exception {
+ initTest(versionInfo);
+ KeyValueContainerData containerData = keyValueContainer.getContainerData();
+ assertEquals(0, containerData.getBlockCount());
+ keyValueContainer.close();
+ assertEquals(CLOSED, keyValueContainer.getContainerState());
+ // 1. Put Block with bcsId = 2, Overwrite = true
+ blockManager.putBlockForClosedContainer(keyValueContainer, blockData1,
true);
+
+ try (DBHandle db = BlockUtils.getDB(containerData, config)) {
+ BlockData fromGetBlockData;
+ //Check Container's bcsId
+ fromGetBlockData = blockManager.getBlock(keyValueContainer,
blockData1.getBlockID());
+ assertEquals(1, containerData.getBlockCount());
+ assertEquals(1, containerData.getBlockCommitSequenceId());
+ assertEquals(1, fromGetBlockData.getBlockCommitSequenceId());
+ assertEquals(1,
db.getStore().getMetadataTable().get(containerData.getBcsIdKey()));
+ assertEquals(1,
db.getStore().getMetadataTable().get(containerData.getBlockCountKey()));
+
+ // 2. Put Block with bcsId = 2, Overwrite = false
+ BlockData blockData2 = createBlockData(1L, 3L, 1, 0, 2048, 2);
+ blockManager.putBlockForClosedContainer(keyValueContainer, blockData2,
false);
+
+ // The block should be written, but we won't be able to read it, As
BcsId < container's BcsId
+ // fails during block read.
+ Assertions.assertThrows(StorageContainerException.class, () ->
blockManager
+ .getBlock(keyValueContainer, blockData2.getBlockID()));
+ assertEquals(2, containerData.getBlockCount());
+ // BcsId should still be 1, as the BcsId is not overwritten
+ assertEquals(1, containerData.getBlockCommitSequenceId());
+ assertEquals(2,
db.getStore().getMetadataTable().get(containerData.getBlockCountKey()));
+ assertEquals(1,
db.getStore().getMetadataTable().get(containerData.getBcsIdKey()));
+
+ // 3. Put Block with bcsId = 2, Overwrite = true
+ // This should succeed as we are overwriting the BcsId, The container
BcsId should be updated to 2
+ // The block count should not change.
+ blockManager.putBlockForClosedContainer(keyValueContainer, blockData2,
true);
+ fromGetBlockData = blockManager.getBlock(keyValueContainer,
blockData2.getBlockID());
+ assertEquals(2, containerData.getBlockCount());
+ assertEquals(2, containerData.getBlockCommitSequenceId());
+ assertEquals(2, fromGetBlockData.getBlockCommitSequenceId());
+ assertEquals(2,
db.getStore().getMetadataTable().get(containerData.getBlockCountKey()));
+ assertEquals(2,
db.getStore().getMetadataTable().get(containerData.getBcsIdKey()));
+
+ // 4. Put Block with bcsId = 1 < container bcsId, Overwrite = true
+ // Container bcsId should not change
+ BlockData blockData3 = createBlockData(1L, 1L, 1, 0, 2048, 1);
Review Comment:
We should call out that this is designed to test the overwrite case where we
are doing put block for a block ID that already exists.
##########
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java:
##########
@@ -225,37 +225,70 @@ public void testPutBlockForClosedContainer() throws
IOException {
containerSet.addContainer(kvContainer);
KeyValueHandler keyValueHandler = createKeyValueHandler(containerSet);
List<ContainerProtos.ChunkInfo> chunkInfoList = new ArrayList<>();
- chunkInfoList.add(getChunkInfo().getProtoBufMessage());
+ ChunkInfo info = new ChunkInfo(String.format("%d.data.%d",
getBlockID().getLocalID(), 0), 0L, 20L);
+
+ chunkInfoList.add(info.getProtoBufMessage());
BlockData putBlockData = new BlockData(getBlockID());
- keyValueHandler.putBlockForClosedContainer(chunkInfoList, kvContainer,
putBlockData, 1L);
- Assertions.assertEquals(containerData.getBlockCommitSequenceId(), 1L);
- Assertions.assertEquals(containerData.getBlockCount(), 1L);
+ putBlockData.setChunks(chunkInfoList);
+
+ ChunkBuffer chunkData = ContainerTestHelper.getData(20);
+ keyValueHandler.writeChunkForClosedContainer(info, getBlockID(),
chunkData, kvContainer);
+ keyValueHandler.putBlockForClosedContainer(kvContainer, putBlockData, 1L,
true);
+ assertEquals(1L, containerData.getBlockCommitSequenceId());
+ assertEquals(1L, containerData.getBlockCount());
try (DBHandle dbHandle = BlockUtils.getDB(containerData, new
OzoneConfiguration())) {
long localID = putBlockData.getLocalID();
BlockData getBlockData = dbHandle.getStore().getBlockDataTable()
.get(containerData.getBlockKey(localID));
Assertions.assertTrue(blockDataEquals(putBlockData, getBlockData));
+ // Overwriting the same
+ assertEquals(20L, containerData.getBytesUsed());
+ assertEquals(20L,
dbHandle.getStore().getMetadataTable().get(containerData.getBytesUsedKey()));
}
// Add another chunk and check the put block data
- ChunkInfo newChunkInfo = new ChunkInfo(String.format("%d.data.%d",
getBlockID()
- .getLocalID(), 1L), 0, 20L);
+ ChunkInfo newChunkInfo = new ChunkInfo(String.format("%d.data.%d",
getBlockID().getLocalID(), 1L), 20L, 20L);
+ chunkInfoList.add(newChunkInfo.getProtoBufMessage());
+ putBlockData.setChunks(chunkInfoList);
+
+ chunkData = ContainerTestHelper.getData(20);
+ keyValueHandler.writeChunkForClosedContainer(newChunkInfo, getBlockID(),
chunkData, kvContainer);
+ keyValueHandler.putBlockForClosedContainer(kvContainer, putBlockData, 2L,
true);
+ assertEquals(2L, containerData.getBlockCommitSequenceId());
+ assertEquals(1L, containerData.getBlockCount());
+
+ try (DBHandle dbHandle = BlockUtils.getDB(containerData, new
OzoneConfiguration())) {
+ long localID = putBlockData.getLocalID();
+ BlockData getBlockData = dbHandle.getStore().getBlockDataTable()
+ .get(containerData.getBlockKey(localID));
+ Assertions.assertTrue(blockDataEquals(putBlockData, getBlockData));
+ assertEquals(40L, containerData.getBytesUsed());
+ assertEquals(40L,
dbHandle.getStore().getMetadataTable().get(containerData.getBytesUsedKey()));
+ }
+
+ newChunkInfo = new ChunkInfo(String.format("%d.data.%d",
getBlockID().getLocalID(), 1L), 20L, 30L);
Review Comment:
Can you add a comment like the other sections, what are we aiming to test in
this last case?
##########
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java:
##########
@@ -196,6 +200,65 @@ public void testPutAndGetBlock(ContainerTestVersionInfo
versionInfo)
}
+ @ContainerTestVersionInfo.ContainerTest
+ public void testPutBlockForClosed(ContainerTestVersionInfo versionInfo)
+ throws Exception {
+ initTest(versionInfo);
+ KeyValueContainerData containerData = keyValueContainer.getContainerData();
+ assertEquals(0, containerData.getBlockCount());
+ keyValueContainer.close();
+ assertEquals(CLOSED, keyValueContainer.getContainerState());
+ // 1. Put Block with bcsId = 2, Overwrite = true
+ blockManager.putBlockForClosedContainer(keyValueContainer, blockData1,
true);
+
+ try (DBHandle db = BlockUtils.getDB(containerData, config)) {
+ BlockData fromGetBlockData;
+ //Check Container's bcsId
+ fromGetBlockData = blockManager.getBlock(keyValueContainer,
blockData1.getBlockID());
+ assertEquals(1, containerData.getBlockCount());
+ assertEquals(1, containerData.getBlockCommitSequenceId());
+ assertEquals(1, fromGetBlockData.getBlockCommitSequenceId());
+ assertEquals(1,
db.getStore().getMetadataTable().get(containerData.getBcsIdKey()));
+ assertEquals(1,
db.getStore().getMetadataTable().get(containerData.getBlockCountKey()));
+
+ // 2. Put Block with bcsId = 2, Overwrite = false
+ BlockData blockData2 = createBlockData(1L, 3L, 1, 0, 2048, 2);
+ blockManager.putBlockForClosedContainer(keyValueContainer, blockData2,
false);
+
+ // The block should be written, but we won't be able to read it, As
BcsId < container's BcsId
+ // fails during block read.
+ Assertions.assertThrows(StorageContainerException.class, () ->
blockManager
+ .getBlock(keyValueContainer, blockData2.getBlockID()));
+ assertEquals(2, containerData.getBlockCount());
+ // BcsId should still be 1, as the BcsId is not overwritten
+ assertEquals(1, containerData.getBlockCommitSequenceId());
+ assertEquals(2,
db.getStore().getMetadataTable().get(containerData.getBlockCountKey()));
+ assertEquals(1,
db.getStore().getMetadataTable().get(containerData.getBcsIdKey()));
+
+ // 3. Put Block with bcsId = 2, Overwrite = true
+ // This should succeed as we are overwriting the BcsId, The container
BcsId should be updated to 2
+ // The block count should not change.
+ blockManager.putBlockForClosedContainer(keyValueContainer, blockData2,
true);
+ fromGetBlockData = blockManager.getBlock(keyValueContainer,
blockData2.getBlockID());
+ assertEquals(2, containerData.getBlockCount());
+ assertEquals(2, containerData.getBlockCommitSequenceId());
+ assertEquals(2, fromGetBlockData.getBlockCommitSequenceId());
+ assertEquals(2,
db.getStore().getMetadataTable().get(containerData.getBlockCountKey()));
+ assertEquals(2,
db.getStore().getMetadataTable().get(containerData.getBcsIdKey()));
+
+ // 4. Put Block with bcsId = 1 < container bcsId, Overwrite = true
+ // Container bcsId should not change
+ BlockData blockData3 = createBlockData(1L, 1L, 1, 0, 2048, 1);
+ blockManager.putBlockForClosedContainer(keyValueContainer, blockData3,
true);
+ fromGetBlockData = blockManager.getBlock(keyValueContainer,
blockData3.getBlockID());
+ assertEquals(3, containerData.getBlockCount());
+ assertEquals(2, containerData.getBlockCommitSequenceId());
+ assertEquals(1, fromGetBlockData.getBlockCommitSequenceId());
+ assertEquals(3,
db.getStore().getMetadataTable().get(containerData.getBlockCountKey()));
+ assertEquals(2,
db.getStore().getMetadataTable().get(containerData.getBcsIdKey()));
Review Comment:
If used bytes is not expected to change we should test that too.
##########
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java:
##########
@@ -196,6 +200,65 @@ public void testPutAndGetBlock(ContainerTestVersionInfo
versionInfo)
}
+ @ContainerTestVersionInfo.ContainerTest
+ public void testPutBlockForClosed(ContainerTestVersionInfo versionInfo)
+ throws Exception {
+ initTest(versionInfo);
+ KeyValueContainerData containerData = keyValueContainer.getContainerData();
+ assertEquals(0, containerData.getBlockCount());
+ keyValueContainer.close();
+ assertEquals(CLOSED, keyValueContainer.getContainerState());
+ // 1. Put Block with bcsId = 2, Overwrite = true
Review Comment:
Same comment lower as well, to reflect this is not a general overwrite
##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java:
##########
@@ -98,6 +98,81 @@ public long putBlock(Container container, BlockData data,
data, endOfBlock);
}
+ @Override
+ public long putBlockForClosedContainer(Container container, BlockData data,
boolean overwriteBcsId)
+ throws IOException {
+ Preconditions.checkNotNull(data, "BlockData cannot be null for put " +
+ "operation.");
+ Preconditions.checkState(data.getContainerID() >= 0, "Container Id " +
+ "cannot be negative");
+
+ KeyValueContainerData containerData = (KeyValueContainerData)
container.getContainerData();
+
+ // We are not locking the key manager since RocksDB serializes all actions
+ // against a single DB. We rely on DB level locking to avoid conflicts.
+ try (DBHandle db = BlockUtils.getDB(containerData, config)) {
+ // This is a post condition that acts as a hint to the user.
+ // Should never fail.
+ Preconditions.checkNotNull(db, DB_NULL_ERR_MSG);
+
+ long bcsId = data.getBlockCommitSequenceId();
Review Comment:
This helps distinguish it from `containerBcsID` when compared below
```suggestion
long blockBcsID = data.getBlockCommitSequenceId();
```
##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java:
##########
@@ -174,8 +175,25 @@ public void writeChunk(Container container, BlockID
blockID, ChunkInfo info,
ChunkUtils.validateChunkSize(channel, info, chunkFile.getName());
}
- ChunkUtils
- .writeData(channel, chunkFile.getName(), data, offset, len, volume);
+ long fileLengthBeforeWrite;
+ try {
+ fileLengthBeforeWrite = channel.size();
+ } catch (IOException e) {
+ throw new StorageContainerException("IO error encountered while " +
+ "getting the file size for " + chunkFile.getName(),
CHUNK_FILE_INCONSISTENCY);
+ }
+
+ ChunkUtils.writeData(channel, chunkFile.getName(), data, offset, len,
volume);
+
+ // When overwriting, update the bytes used if the new length is greater
than the old length
+ // This is to ensure that the bytes used is updated correctly when
overwriting a smaller chunk
+ // with a larger chunk.
Review Comment:
```suggestion
// This is to ensure that the bytes used is updated correctly when
overwriting a smaller chunk
// with a larger chunk at the end of the block.
```
##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java:
##########
@@ -174,8 +175,25 @@ public void writeChunk(Container container, BlockID
blockID, ChunkInfo info,
ChunkUtils.validateChunkSize(channel, info, chunkFile.getName());
}
- ChunkUtils
- .writeData(channel, chunkFile.getName(), data, offset, len, volume);
+ long fileLengthBeforeWrite;
+ try {
+ fileLengthBeforeWrite = channel.size();
+ } catch (IOException e) {
+ throw new StorageContainerException("IO error encountered while " +
+ "getting the file size for " + chunkFile.getName(),
CHUNK_FILE_INCONSISTENCY);
+ }
+
+ ChunkUtils.writeData(channel, chunkFile.getName(), data, offset, len,
volume);
+
+ // When overwriting, update the bytes used if the new length is greater
than the old length
+ // This is to ensure that the bytes used is updated correctly when
overwriting a smaller chunk
+ // with a larger chunk.
Review Comment:
Do we have a safeguard to fail the write if we are overwriting a chunk that
is not the last one and exceeding the existing length?
##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java:
##########
@@ -98,6 +98,81 @@ public long putBlock(Container container, BlockData data,
data, endOfBlock);
}
+ @Override
+ public long putBlockForClosedContainer(Container container, BlockData data,
boolean overwriteBcsId)
Review Comment:
This method needs a pretty substantial javadoc covering its intended uses
and what is supposed to be set in the block and container data by the caller.
For example, block overwrite is automatically handled, and caller is supposed
to set used bytes values.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]