[30/34] git commit: HDFS-7143. Fix findbugs warnings in HDFS-6581 branch. (Contributed by Tsz Wo Nicholas Sze)

2014-10-17 Thread jitendra
HDFS-7143. Fix findbugs warnings in HDFS-6581 branch. (Contributed by Tsz Wo 
Nicholas Sze)

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6bb27f0f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6bb27f0f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6bb27f0f

Branch: refs/heads/branch-2
Commit: 6bb27f0ff6a52d01c423d2c9fa9647f188c941b8
Parents: 69828a9
Author: arp a...@apache.org
Authored: Wed Sep 24 20:13:30 2014 -0700
Committer: Jitendra Pandey Jitendra@Jitendra-Pandeys-MacBook-Pro-4.local
Committed: Fri Oct 17 13:42:03 2014 -0700

--
 .../datanode/fsdataset/impl/BlockPoolSlice.java | 50 +++-
 1 file changed, 38 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bb27f0f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
index bfa1772..3f58d38 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
@@ -29,6 +29,8 @@ import java.io.RandomAccessFile;
 import java.util.Scanner;
 
 import org.apache.commons.io.FileUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.DU;
 import org.apache.hadoop.fs.FileUtil;
@@ -43,6 +45,7 @@ import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaWaitingToBeRecovered;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@@ -57,6 +60,8 @@ import org.apache.hadoop.util.Time;
  * This class is synchronized by {@link FsVolumeImpl}.
  */
 class BlockPoolSlice {
+  static final Log LOG = LogFactory.getLog(BlockPoolSlice.class);
+
   private final String bpid;
   private final FsVolumeImpl volume; // volume to which this BlockPool belongs 
to
   private final File currentDir; // StorageDirectory/current/bpid/current
@@ -375,22 +380,36 @@ class BlockPoolSlice {
 File targetDir = DatanodeUtil.idToBlockDir(finalizedDir, blockId);
 
 if (blockFile.exists()) {
-  File targetBlockFile = new File(targetDir, blockFile.getName());
-  File targetMetaFile = new File(targetDir, metaFile.getName());
 
   if (!targetDir.exists()  !targetDir.mkdirs()) {
-FsDatasetImpl.LOG.warn(Failed to move  + blockFile +  to  + 
targetDir);
+LOG.warn(Failed to mkdirs  + targetDir);
+continue;
+  }
+
+  final File targetMetaFile = new File(targetDir, metaFile.getName());
+  try {
+NativeIO.renameTo(metaFile, targetMetaFile);
+  } catch (IOException e) {
+LOG.warn(Failed to move meta file from 
++ metaFile +  to  + targetMetaFile, e);
 continue;
+
   }
 
-  metaFile.renameTo(targetMetaFile);
-  blockFile.renameTo(targetBlockFile);
+  final File targetBlockFile = new File(targetDir, 
blockFile.getName());
+  try {
+NativeIO.renameTo(blockFile, targetBlockFile);
+  } catch (IOException e) {
+LOG.warn(Failed to move block file from 
++ blockFile +  to  + targetBlockFile, e);
+continue;
+  }
 
   if (targetBlockFile.exists()  targetMetaFile.exists()) {
 ++numRecovered;
   } else {
 // Failure should be rare.
-FsDatasetImpl.LOG.warn(Failed to move  + blockFile +  to  + 
targetDir);
+LOG.warn(Failed to move  + blockFile +  to  + targetDir);
   }
 }
   }
@@ -544,16 +563,23 @@ class BlockPoolSlice {
 
 replicaToDelete = (replicaToKeep == replica1) ? replica2 : replica1;
 
+if (LOG.isDebugEnabled()) {
+  LOG.debug(resolveDuplicateReplicas decide to keep  + replicaToKeep
+  + .  Will try to delete  + replicaToDelete);
+}
+
 // Update volumeMap.
 volumeMap.add(bpid, replicaToKeep);
 
  

[29/34] git commit: HDFS-7143. Fix findbugs warnings in HDFS-6581 branch. (Contributed by Tsz Wo Nicholas Sze)

2014-10-17 Thread jitendra
HDFS-7143. Fix findbugs warnings in HDFS-6581 branch. (Contributed by Tsz Wo 
Nicholas Sze)

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05cfd688
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05cfd688
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05cfd688

Branch: refs/heads/branch-2.6
Commit: 05cfd68863737587984f51d36b3d21e4707b9d33
Parents: 9de4d18
Author: arp a...@apache.org
Authored: Wed Sep 24 20:13:30 2014 -0700
Committer: Jitendra Pandey Jitendra@Jitendra-Pandeys-MacBook-Pro-4.local
Committed: Fri Oct 17 16:00:52 2014 -0700

--
 .../datanode/fsdataset/impl/BlockPoolSlice.java | 50 +++-
 1 file changed, 38 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05cfd688/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
index bfa1772..3f58d38 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
@@ -29,6 +29,8 @@ import java.io.RandomAccessFile;
 import java.util.Scanner;
 
 import org.apache.commons.io.FileUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.DU;
 import org.apache.hadoop.fs.FileUtil;
@@ -43,6 +45,7 @@ import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaWaitingToBeRecovered;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@@ -57,6 +60,8 @@ import org.apache.hadoop.util.Time;
  * This class is synchronized by {@link FsVolumeImpl}.
  */
 class BlockPoolSlice {
+  static final Log LOG = LogFactory.getLog(BlockPoolSlice.class);
+
   private final String bpid;
   private final FsVolumeImpl volume; // volume to which this BlockPool belongs 
to
   private final File currentDir; // StorageDirectory/current/bpid/current
@@ -375,22 +380,36 @@ class BlockPoolSlice {
 File targetDir = DatanodeUtil.idToBlockDir(finalizedDir, blockId);
 
 if (blockFile.exists()) {
-  File targetBlockFile = new File(targetDir, blockFile.getName());
-  File targetMetaFile = new File(targetDir, metaFile.getName());
 
   if (!targetDir.exists()  !targetDir.mkdirs()) {
-FsDatasetImpl.LOG.warn(Failed to move  + blockFile +  to  + 
targetDir);
+LOG.warn(Failed to mkdirs  + targetDir);
+continue;
+  }
+
+  final File targetMetaFile = new File(targetDir, metaFile.getName());
+  try {
+NativeIO.renameTo(metaFile, targetMetaFile);
+  } catch (IOException e) {
+LOG.warn(Failed to move meta file from 
++ metaFile +  to  + targetMetaFile, e);
 continue;
+
   }
 
-  metaFile.renameTo(targetMetaFile);
-  blockFile.renameTo(targetBlockFile);
+  final File targetBlockFile = new File(targetDir, 
blockFile.getName());
+  try {
+NativeIO.renameTo(blockFile, targetBlockFile);
+  } catch (IOException e) {
+LOG.warn(Failed to move block file from 
++ blockFile +  to  + targetBlockFile, e);
+continue;
+  }
 
   if (targetBlockFile.exists()  targetMetaFile.exists()) {
 ++numRecovered;
   } else {
 // Failure should be rare.
-FsDatasetImpl.LOG.warn(Failed to move  + blockFile +  to  + 
targetDir);
+LOG.warn(Failed to move  + blockFile +  to  + targetDir);
   }
 }
   }
@@ -544,16 +563,23 @@ class BlockPoolSlice {
 
 replicaToDelete = (replicaToKeep == replica1) ? replica2 : replica1;
 
+if (LOG.isDebugEnabled()) {
+  LOG.debug(resolveDuplicateReplicas decide to keep  + replicaToKeep
+  + .  Will try to delete  + replicaToDelete);
+}
+
 // Update volumeMap.
 volumeMap.add(bpid, replicaToKeep);
 

git commit: HDFS-7143. Fix findbugs warnings in HDFS-6581 branch. (Contributed by Tsz Wo Nicholas Sze)

2014-09-24 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-6581 3f9255f21 - feda4733a (forced update)


HDFS-7143. Fix findbugs warnings in HDFS-6581 branch. (Contributed by Tsz Wo 
Nicholas Sze)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/feda4733
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/feda4733
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/feda4733

Branch: refs/heads/HDFS-6581
Commit: feda4733a8279485fc0ff1271f9c22bc44f333f6
Parents: b1000fb
Author: arp a...@apache.org
Authored: Wed Sep 24 20:13:30 2014 -0700
Committer: arp a...@apache.org
Committed: Wed Sep 24 21:06:56 2014 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-6581.txt   |  2 +
 .../datanode/fsdataset/impl/BlockPoolSlice.java | 50 +++-
 2 files changed, 40 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/feda4733/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt
index e046421..6eb8cec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt
@@ -71,4 +71,6 @@
 HDFS-6990. Add unit test for evict/delete RAM_DISK block with open
 handle. (Xiaoyu Yao via Arpit Agarwal)
 
+HDFS-7143. Fix findbugs warnings in HDFS-6581 branch. (szetszwo via
+Arpit Agarwal)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/feda4733/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
index 2ee16f6..3eeb3ef 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
@@ -29,6 +29,8 @@ import java.io.RandomAccessFile;
 import java.util.Scanner;
 
 import org.apache.commons.io.FileUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.DU;
 import org.apache.hadoop.fs.FileUtil;
@@ -43,6 +45,7 @@ import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaWaitingToBeRecovered;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@@ -57,6 +60,8 @@ import org.apache.hadoop.util.Time;
  * This class is synchronized by {@link FsVolumeImpl}.
  */
 class BlockPoolSlice {
+  static final Log LOG = LogFactory.getLog(BlockPoolSlice.class);
+
   private final String bpid;
   private final FsVolumeImpl volume; // volume to which this BlockPool belongs 
to
   private final File currentDir; // StorageDirectory/current/bpid/current
@@ -369,22 +374,36 @@ class BlockPoolSlice {
 File targetDir = DatanodeUtil.idToBlockDir(finalizedDir, blockId);
 
 if (blockFile.exists()) {
-  File targetBlockFile = new File(targetDir, blockFile.getName());
-  File targetMetaFile = new File(targetDir, metaFile.getName());
 
   if (!targetDir.exists()  !targetDir.mkdirs()) {
-FsDatasetImpl.LOG.warn(Failed to move  + blockFile +  to  + 
targetDir);
+LOG.warn(Failed to mkdirs  + targetDir);
+continue;
+  }
+
+  final File targetMetaFile = new File(targetDir, metaFile.getName());
+  try {
+NativeIO.renameTo(metaFile, targetMetaFile);
+  } catch (IOException e) {
+LOG.warn(Failed to move meta file from 
++ metaFile +  to  + targetMetaFile, e);
 continue;
+
   }
 
-  metaFile.renameTo(targetMetaFile);
-  blockFile.renameTo(targetBlockFile);
+  final File targetBlockFile = new File(targetDir, 
blockFile.getName());
+  try {
+NativeIO.renameTo(blockFile, targetBlockFile);
+  } catch (IOException e) {
+LOG.warn(Failed to move block file from 
++ blockFile +  to  + targetBlockFile, e);
+