This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
     new 4dd97522d9fe HDFS-17024. Potential data race introduced by HDFS-15865 
(#6223)
4dd97522d9fe is described below

commit 4dd97522d9fecb9ce985689ba46cc25832c2b963
Author: Hiroaki Segawa <g...@stonedot.com>
AuthorDate: Fri Oct 27 14:25:00 2023 +0900

    HDFS-17024. Potential data race introduced by HDFS-15865 (#6223)
    
    (cherry picked from commit 93a3c6e2cd4db4b395b3ec00a513dd3aceb3e306)
---
 .../src/main/java/org/apache/hadoop/hdfs/DataStreamer.java     | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 358a485900d5..ccc2dfe20688 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -475,6 +475,7 @@ class DataStreamer extends Daemon {
   private DataOutputStream blockStream;
   private DataInputStream blockReplyStream;
   private ResponseProcessor response = null;
+  private final Object nodesLock = new Object();
   private volatile DatanodeInfo[] nodes = null; // list of targets for current 
block
   private volatile StorageType[] storageTypes = null;
   private volatile String[] storageIDs = null;
@@ -613,7 +614,9 @@ class DataStreamer extends Daemon {
 
   private void setPipeline(DatanodeInfo[] nodes, StorageType[] storageTypes,
                            String[] storageIDs) {
-    this.nodes = nodes;
+    synchronized (nodesLock) {
+      this.nodes = nodes;
+    }
     this.storageTypes = storageTypes;
     this.storageIDs = storageIDs;
   }
@@ -910,7 +913,10 @@ class DataStreamer extends Daemon {
     try (TraceScope ignored = dfsClient.getTracer().
         newScope("waitForAckedSeqno")) {
       LOG.debug("{} waiting for ack for: {}", this, seqno);
-      int dnodes = nodes != null ? nodes.length : 3;
+      int dnodes;
+      synchronized (nodesLock) {
+        dnodes = nodes != null ? nodes.length : 3;
+      }
       int writeTimeout = dfsClient.getDatanodeWriteTimeout(dnodes);
       long begin = Time.monotonicNow();
       try {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to