Author: hairong
Date: Fri Mar 13 19:00:48 2009
New Revision: 753346

URL: http://svn.apache.org/viewvc?rev=753346&view=rev
Log:
HADOOP-5465. Blocks remain under-replicated. Contributed by Hairong Kuang.

Modified:
    hadoop/core/trunk/CHANGES.txt
    
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java

Modified: hadoop/core/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=753346&r1=753345&r2=753346&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Fri Mar 13 19:00:48 2009
@@ -2119,6 +2119,10 @@
     HADOOP-5412. Simulated DataNode should not write to a block that's being
     written by another thread. (hairong)
 
+    HADOOP-5414. Fix the problem of blocks remaining under-replicated by
+    providing synchronized modification to the counter xmitsInProgress in
+    DataNode. (hairong)
+
 Release 0.18.3 - 2009-01-27
 
   IMPROVEMENTS

Modified: 
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=753346&r1=753345&r2=753346&view=diff
==============================================================================
--- 
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java 
(original)
+++ 
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java 
Fri Mar 13 19:00:48 2009
@@ -39,6 +39,7 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -163,7 +164,7 @@
   private final Map<Block, Block> ongoingRecovery = new HashMap<Block, 
Block>();
   private LinkedList<String> delHints = new LinkedList<String>();
   public final static String EMPTY_DEL_HINT = "";
-  int xmitsInProgress = 0;
+  AtomicInteger xmitsInProgress = new AtomicInteger();
   Daemon dataXceiverServer = null;
   ThreadGroup threadGroup = null;
   long blockReportInterval;
@@ -703,7 +704,7 @@
                                                        data.getCapacity(),
                                                        data.getDfsUsed(),
                                                        data.getRemaining(),
-                                                       xmitsInProgress,
+                                                       xmitsInProgress.get(),
                                                        getXceiverCount());
           myMetrics.heartbeats.inc(now() - startTime);
           //LOG.info("Just sent heartbeat, with name " + localName);
@@ -1110,7 +1111,7 @@
      * Do the deed, write the bytes
      */
     public void run() {
-      xmitsInProgress++;
+      xmitsInProgress.getAndIncrement();
       Socket sock = null;
       DataOutputStream out = null;
       BlockSender blockSender = null;
@@ -1159,10 +1160,10 @@
         LOG.warn(dnRegistration + ":Failed to transfer " + b + " to " + 
targets[0].getName()
             + " got " + StringUtils.stringifyException(ie));
       } finally {
+        xmitsInProgress.getAndDecrement();
         IOUtils.closeStream(blockSender);
         IOUtils.closeStream(out);
         IOUtils.closeSocket(sock);
-        xmitsInProgress--;
       }
     }
   }
@@ -1291,7 +1292,7 @@
       "data=" + data +
       ", localName='" + dnRegistration.getName() + "'" +
       ", storageID='" + dnRegistration.getStorageID() + "'" +
-      ", xmitsInProgress=" + xmitsInProgress +
+      ", xmitsInProgress=" + xmitsInProgress.get() +
       "}";
   }
   


Reply via email to