This is an automated email from the ASF dual-hosted git repository.
sumitagrawal pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new f3cd59a89c0 HDDS-14183. Attempted to decrement available space to a
negative value (#9655)
f3cd59a89c0 is described below
commit f3cd59a89c0ac161f7f609d047f0f0a9da8c0887
Author: Sarveksha Yeshavantha Raju
<[email protected]>
AuthorDate: Thu Mar 26 16:47:30 2026 +0530
HDDS-14183. Attempted to decrement available space to a negative value
(#9655)
---
.../ozone/container/common/impl/ContainerData.java | 37 +++++++++++++++-
.../keyvalue/impl/FilePerBlockStrategy.java | 27 ++++++++++--
.../keyvalue/impl/TestFilePerBlockStrategy.java | 51 ++++++++++++++++++++++
3 files changed, 109 insertions(+), 6 deletions(-)
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
index c334a2d842e..31b890fdbd3 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
@@ -402,7 +402,7 @@ public Statistics getStatistics() {
* Also decrement committed bytes against the bytes written.
* @param bytes the number of bytes write into the container.
*/
- private void incrWriteBytes(long bytes) {
+ public void incrWriteBytes(long bytes) {
/*
Increase the cached Used Space in VolumeInfo as it
maybe not updated, DU or DedicatedDiskSpaceUsage runs
@@ -569,9 +569,34 @@ public boolean needsDataChecksum() {
*/
public abstract long getBlockCommitSequenceId();
+ /**
+ * Update write statistics for chunk operations.
+ * <p>
+ * This method handles two distinct cases:
+ * 1. New writes (overwrite=false):
+ * - Updates I/O metrics: writeBytes, writeCount
+ * - Updates disk metrics: blockBytes
+ * - Updates space metrics: usedSpace, committedBytes (via incrWriteBytes)
+ * 2. Overwrites (overwrite=true):
+ * - Updates I/O metrics only: writeBytes, writeCount
+ * - Does NOT update space metrics (file may not grow)
+ * - blockBytes is handled separately for file growth
+ * <p>
+ * Example for overwrite with growth (file 4 bytes, overwrite 6 bytes at
offset 2):
+ * - bytesWritten=6, overwrite=true
+ * - writeBytes += 6 (I/O operation size)
+ * - writeCount += 1 (one operation)
+ * - usedSpace/committedBytes NOT updated here (delta handled separately)
+ * - blockBytes NOT updated here (delta=4 handled by incrementBlockBytes)
+ *
+ * @param bytesWritten Number of bytes in the I/O operation
+ * @param overwrite Whether this is an overwrite operation
+ */
public void updateWriteStats(long bytesWritten, boolean overwrite) {
getStatistics().updateWrite(bytesWritten, overwrite);
- incrWriteBytes(bytesWritten);
+ if (!overwrite) {
+ incrWriteBytes(bytesWritten);
+ }
}
@Override
@@ -672,6 +697,14 @@ public synchronized void updateWrite(long length, boolean
overwrite) {
writeBytes += length;
}
+ /**
+ * Increment blockBytes by the given delta.
+ * This is used for overwrite operations that extend the file.
+ */
+ public synchronized void incrementBlockBytes(long delta) {
+ blockBytes += delta;
+ }
+
public synchronized void decDeletion(long deletedBytes, long
processedBytes, long deletedBlockCount,
long processedBlockCount) {
blockBytes -= deletedBytes;
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
index 36ebdc5aa82..9a13507d6b3 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
@@ -182,16 +182,35 @@ public void writeChunk(Container container, BlockID
blockID, ChunkInfo info,
ChunkUtils.writeData(channel, chunkFile.getName(), data, offset,
chunkLength, volume);
- // When overwriting, update the bytes used if the new length is greater
than the old length
- // This is to ensure that the bytes used is updated correctly when
overwriting a smaller chunk
- // with a larger chunk at the end of the block.
+ // Handle space accounting for overwrites that extend the file length.
+ // For overwrites, we must distinguish between:
+ // 1. Pure overwrites (no file growth): No space consumed, only I/O
metrics updated
+ // 2. Overwrites with growth (file extends): Only the delta consumes new
space
+ //
+ // Example: File is 4 bytes. Overwrite 6 bytes at offset 2.
+ // - I/O operation: 6 bytes written (tracked by updateWriteStats below)
+ // - Disk growth: 4 bytes (file grows from 4 -> 8, delta = 4)
+ // - Space consumed: 4 bytes (only the delta, not the full 6 bytes)
+ //
+ // We handle the delta BEFORE calling updateWriteStats to ensure correct
accounting:
+ // - incrementBlockBytes(delta): Updates blockBytes for disk space growth
+ // - incrWriteBytes(delta): Updates usedSpace/committedBytes for space
consumed
+ // - updateWriteStats(chunkLength, true): Updates I/O metrics (writeBytes,
writeCount)
+ // but skips space updates because overwrite=true
if (overwrite) {
long fileLengthAfterWrite = offset + chunkLength;
if (fileLengthAfterWrite > fileLengthBeforeWrite) {
- containerData.getStatistics().updateWrite(fileLengthAfterWrite -
fileLengthBeforeWrite, false);
+ long delta = fileLengthAfterWrite - fileLengthBeforeWrite;
+ // Update disk space accounting for the file growth (delta only)
+ containerData.getStatistics().incrementBlockBytes(delta);
+ // Update volume space accounting for the new space consumed (delta
only)
+ containerData.incrWriteBytes(delta);
}
}
+ // Update I/O metrics (writeBytes, writeCount) and space metrics for new
writes.
+ // For overwrites (overwrite=true), this only updates I/O metrics.
+ // For new writes (overwrite=false), this updates both I/O and space
metrics.
containerData.updateWriteStats(chunkLength, overwrite);
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java
index 95475651d01..364ddad2cfd 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java
@@ -229,6 +229,57 @@ public void testWriteChunkForClosedContainer()
Assertions.assertEquals(containerData.getBytesUsed(),
writeChunkData.remaining() + newWriteChunkData.remaining());
}
+ /**
+ * Test that overwrite operations that extend the file correctly update
usedSpace by the delta.
+ */
+ @Test
+ public void testOverwriteFileExtensionUpdatesByDelta() throws Exception {
+ KeyValueContainer kvContainer = getKeyValueContainer();
+ KeyValueContainerData containerData = kvContainer.getContainerData();
+ ChunkManager chunkManager = createTestSubject();
+
+ // Initial write: 4 bytes at offset 0
+ byte[] initialData = "test".getBytes(UTF_8);
+ ChunkInfo initialChunk = new ChunkInfo(String.format("%d.data.%d",
getBlockID().getLocalID(), 0),
+ 0, // offset
+ initialData.length);
+ ChunkBuffer initialBuffer =
ChunkBuffer.allocate(initialData.length).put(initialData);
+ initialBuffer.rewind();
+ setDataChecksum(initialChunk, initialBuffer);
+
+ long initialUsedSpace =
containerData.getVolume().getCurrentUsage().getUsedSpace();
+ long initialBlockBytes = containerData.getBytesUsed();
+ chunkManager.writeChunk(kvContainer, getBlockID(), initialChunk,
initialBuffer, WRITE_STAGE);
+ long afterFirstWriteUsedSpace =
containerData.getVolume().getCurrentUsage().getUsedSpace();
+ long afterFirstWriteBlockBytes = containerData.getBytesUsed();
+
+ assertEquals(initialUsedSpace + initialData.length,
afterFirstWriteUsedSpace);
+ assertEquals(initialBlockBytes + initialData.length,
afterFirstWriteBlockBytes);
+
+ // Overwrite that extends file: write 6 bytes at offset 2 (extends file
from 4 to 8 bytes)
+ // File before: [t][e][s][t]
+ // File after: [t][e][e][x][t][e][n][d]
+ // File length delta: 8 - 4 = 4 bytes
+ byte[] overwriteData = "extend".getBytes(UTF_8);
+ ChunkInfo overwriteChunk = new ChunkInfo(String.format("%d.data.%d",
getBlockID().getLocalID(), 0),
+ 2, // offset - starts at position 2
+ overwriteData.length);
+ ChunkBuffer overwriteBuffer =
ChunkBuffer.allocate(overwriteData.length).put(overwriteData);
+ overwriteBuffer.rewind();
+ setDataChecksum(overwriteChunk, overwriteBuffer);
+
+ chunkManager.writeChunk(kvContainer, getBlockID(), overwriteChunk,
overwriteBuffer, WRITE_STAGE);
+ long afterOverwriteUsedSpace =
containerData.getVolume().getCurrentUsage().getUsedSpace();
+ long afterOverwriteBlockBytes = containerData.getBytesUsed();
+
+ long expectedDelta = (2 + overwriteData.length) - initialData.length; // 8
- 4 = 4
+ long expectedWriteBytes = initialData.length + overwriteData.length; // 4
+ 6 = 10
+
+ assertEquals(afterFirstWriteUsedSpace + expectedDelta,
afterOverwriteUsedSpace);
+ assertEquals(afterFirstWriteBlockBytes + expectedDelta,
afterOverwriteBlockBytes);
+ assertEquals(expectedWriteBytes,
containerData.getStatistics().getWriteBytes());
+ }
+
@Test
public void testPutBlockForClosedContainer() throws IOException {
OzoneConfiguration conf = new OzoneConfiguration();
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]