This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 5663971efe HDDS-11665. Minor optimizations on the write path (#7407)
5663971efe is described below
commit 5663971efe62fe7b50340a715bf2f3a49cdf47ba
Author: Duong Nguyen <[email protected]>
AuthorDate: Tue Nov 12 10:04:15 2024 -0800
HDDS-11665. Minor optimizations on the write path (#7407)
---
.../main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java | 2 +-
.../apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java | 4 +---
.../apache/hadoop/ozone/container/common/helpers/BlockData.java | 2 +-
.../java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java | 8 ++------
4 files changed, 5 insertions(+), 11 deletions(-)
diff --git
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index b0ef85cfbf..979b1b9920 100644
---
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -282,7 +282,7 @@ public final class XceiverClientRatis extends
XceiverClientSpi {
// gets the minimum log index replicated to all servers
@Override
public long getReplicatedMinCommitIndex() {
- return commitInfoMap.values().parallelStream()
+ return commitInfoMap.values().stream()
.mapToLong(Long::longValue).min().orElse(0);
}
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java
index 36c16e92bf..782476eb56 100644
---
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java
+++
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java
@@ -25,7 +25,6 @@ import java.util.Iterator;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.Objects;
-import java.util.UUID;
import java.util.function.Function;
import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
@@ -35,7 +34,6 @@ import org.apache.ratis.util.UncheckedAutoCloseable;
final class ChunkBufferImplWithByteBuffer implements ChunkBuffer {
private final ByteBuffer buffer;
private final UncheckedAutoCloseable underlying;
- private final UUID identity = UUID.randomUUID();
ChunkBufferImplWithByteBuffer(ByteBuffer buffer) {
this(buffer, null);
@@ -163,6 +161,6 @@ final class ChunkBufferImplWithByteBuffer implements
ChunkBuffer {
@Override
public String toString() {
return getClass().getSimpleName() + ":limit=" + buffer.limit()
- + "@" + identity;
+ + "@" + Integer.toHexString(super.hashCode());
}
}
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
index b3ee599173..4fee39921b 100644
---
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
+++
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
@@ -254,7 +254,7 @@ public class BlockData {
size = singleChunk.getLen();
} else {
chunkList = chunks;
- size = chunks.parallelStream()
+ size = chunks.stream()
.mapToLong(ContainerProtos.ChunkInfo::getLen)
.sum();
}
diff --git
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
index 4f9e5db49a..b5f8191d36 100644
---
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
+++
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
@@ -303,7 +303,7 @@ public class KeyOutputStream extends OutputStream
if (retry) {
current.writeOnRetry(len);
} else {
- waitForRetryHandling(current);
+ current.waitForRetryHandling(retryHandlingCondition);
current.write(b, off, writeLen);
offset += writeLen;
}
@@ -584,7 +584,7 @@ public class KeyOutputStream extends OutputStream
blockOutputStreamEntryPool.getCurrentStreamEntry();
if (entry != null) {
// If the current block is to handle retries, wait until all the
retries are done.
- waitForRetryHandling(entry);
+ doInWriteLock(() ->
entry.waitForRetryHandling(retryHandlingCondition));
entry.registerCallReceived();
try {
handleStreamAction(entry, op);
@@ -608,10 +608,6 @@ public class KeyOutputStream extends OutputStream
}
}
- private void waitForRetryHandling(BlockOutputStreamEntry currentEntry)
throws InterruptedException {
- doInWriteLock(() ->
currentEntry.waitForRetryHandling(retryHandlingCondition));
- }
-
private void handleStreamAction(BlockOutputStreamEntry entry,
StreamAction op) throws IOException {
Collection<DatanodeDetails> failedServers = entry.getFailedServers();
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]