[03/50] [abbrv] hadoop git commit: HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for interleaving block reports. Contributed by Vinitha Gankidi.

2016-10-21 Thread aengineer
HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for 
interleaving block reports. Contributed by Vinitha Gankidi.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/391ce535
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/391ce535
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/391ce535

Branch: refs/heads/HDFS-7240
Commit: 391ce535a739dc92cb90017d759217265a4fd969
Parents: 30bb197
Author: Vinitha Reddy Gankidi 
Authored: Fri Oct 14 10:37:44 2016 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Oct 14 18:13:54 2016 -0700

--
 .../server/blockmanagement/BlockManager.java| 75 ++--
 .../blockmanagement/DatanodeDescriptor.java | 48 -
 .../blockmanagement/DatanodeStorageInfo.java| 11 ---
 .../hdfs/server/namenode/NameNodeRpcServer.java |  4 +-
 .../blockmanagement/TestBlockManager.java   | 19 +++--
 .../TestNameNodePrunesMissingStorages.java  | 70 +++---
 .../server/datanode/BlockReportTestBase.java| 50 +
 .../TestAddOverReplicatedStripedBlocks.java |  4 ++
 8 files changed, 147 insertions(+), 134 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/391ce535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 7949439..7b13add 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1347,6 +1347,8 @@ public class BlockManager implements BlockStatsMXBean {
   }
 }
 checkSafeMode();
+LOG.info("Removed blocks associated with storage {} from DataNode {}",
+storageInfo, node);
   }
 
   /**
@@ -2191,7 +2193,7 @@ public class BlockManager implements BlockStatsMXBean {
   public boolean processReport(final DatanodeID nodeID,
   final DatanodeStorage storage,
   final BlockListAsLongs newReport,
-  BlockReportContext context, boolean lastStorageInRpc) throws IOException 
{
+  BlockReportContext context) throws IOException {
 namesystem.writeLock();
 final long startTime = Time.monotonicNow(); //after acquiring write lock
 final long endTime;
@@ -2245,32 +2247,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
   
   storageInfo.receivedBlockReport();
-  if (context != null) {
-storageInfo.setLastBlockReportId(context.getReportId());
-if (lastStorageInRpc) {
-  int rpcsSeen = node.updateBlockReportContext(context);
-  if (rpcsSeen >= context.getTotalRpcs()) {
-long leaseId = blockReportLeaseManager.removeLease(node);
-BlockManagerFaultInjector.getInstance().
-removeBlockReportLease(node, leaseId);
-List zombies = node.removeZombieStorages();
-if (zombies.isEmpty()) {
-  LOG.debug("processReport 0x{}: no zombie storages found.",
-  Long.toHexString(context.getReportId()));
-} else {
-  for (DatanodeStorageInfo zombie : zombies) {
-removeZombieReplicas(context, zombie);
-  }
-}
-node.clearBlockReportContext();
-  } else {
-LOG.debug("processReport 0x{}: {} more RPCs remaining in this " +
-"report.", Long.toHexString(context.getReportId()),
-(context.getTotalRpcs() - rpcsSeen)
-);
-  }
-}
-  }
 } finally {
   endTime = Time.monotonicNow();
   namesystem.writeUnlock();
@@ -2295,36 +2271,25 @@ public class BlockManager implements BlockStatsMXBean {
 return !node.hasStaleStorages();
   }
 
-  private void removeZombieReplicas(BlockReportContext context,
-  DatanodeStorageInfo zombie) {
-LOG.warn("processReport 0x{}: removing zombie storage {}, which no " +
-"longer exists on the DataNode.",
-Long.toHexString(context.getReportId()), zombie.getStorageID());
-assert(namesystem.hasWriteLock());
-Iterator iter = zombie.getBlockIterator();
-int prevBlocks = zombie.numBlocks();
-while (iter.hasNext()) {
-  BlockInfo block = iter.next();
-  // We assume that a block can be on only one storage in a DataNode.
-  // That's why we pass in the DatanodeDescriptor 

[25/50] [abbrv] hadoop git commit: HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for interleaving block reports. Contributed by Vinitha Gankidi.

2016-10-18 Thread sjlee
HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for 
interleaving block reports. Contributed by Vinitha Gankidi.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/391ce535
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/391ce535
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/391ce535

Branch: refs/heads/HADOOP-13070
Commit: 391ce535a739dc92cb90017d759217265a4fd969
Parents: 30bb197
Author: Vinitha Reddy Gankidi 
Authored: Fri Oct 14 10:37:44 2016 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Oct 14 18:13:54 2016 -0700

--
 .../server/blockmanagement/BlockManager.java| 75 ++--
 .../blockmanagement/DatanodeDescriptor.java | 48 -
 .../blockmanagement/DatanodeStorageInfo.java| 11 ---
 .../hdfs/server/namenode/NameNodeRpcServer.java |  4 +-
 .../blockmanagement/TestBlockManager.java   | 19 +++--
 .../TestNameNodePrunesMissingStorages.java  | 70 +++---
 .../server/datanode/BlockReportTestBase.java| 50 +
 .../TestAddOverReplicatedStripedBlocks.java |  4 ++
 8 files changed, 147 insertions(+), 134 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/391ce535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 7949439..7b13add 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1347,6 +1347,8 @@ public class BlockManager implements BlockStatsMXBean {
   }
 }
 checkSafeMode();
+LOG.info("Removed blocks associated with storage {} from DataNode {}",
+storageInfo, node);
   }
 
   /**
@@ -2191,7 +2193,7 @@ public class BlockManager implements BlockStatsMXBean {
   public boolean processReport(final DatanodeID nodeID,
   final DatanodeStorage storage,
   final BlockListAsLongs newReport,
-  BlockReportContext context, boolean lastStorageInRpc) throws IOException 
{
+  BlockReportContext context) throws IOException {
 namesystem.writeLock();
 final long startTime = Time.monotonicNow(); //after acquiring write lock
 final long endTime;
@@ -2245,32 +2247,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
   
   storageInfo.receivedBlockReport();
-  if (context != null) {
-storageInfo.setLastBlockReportId(context.getReportId());
-if (lastStorageInRpc) {
-  int rpcsSeen = node.updateBlockReportContext(context);
-  if (rpcsSeen >= context.getTotalRpcs()) {
-long leaseId = blockReportLeaseManager.removeLease(node);
-BlockManagerFaultInjector.getInstance().
-removeBlockReportLease(node, leaseId);
-List zombies = node.removeZombieStorages();
-if (zombies.isEmpty()) {
-  LOG.debug("processReport 0x{}: no zombie storages found.",
-  Long.toHexString(context.getReportId()));
-} else {
-  for (DatanodeStorageInfo zombie : zombies) {
-removeZombieReplicas(context, zombie);
-  }
-}
-node.clearBlockReportContext();
-  } else {
-LOG.debug("processReport 0x{}: {} more RPCs remaining in this " +
-"report.", Long.toHexString(context.getReportId()),
-(context.getTotalRpcs() - rpcsSeen)
-);
-  }
-}
-  }
 } finally {
   endTime = Time.monotonicNow();
   namesystem.writeUnlock();
@@ -2295,36 +2271,25 @@ public class BlockManager implements BlockStatsMXBean {
 return !node.hasStaleStorages();
   }
 
-  private void removeZombieReplicas(BlockReportContext context,
-  DatanodeStorageInfo zombie) {
-LOG.warn("processReport 0x{}: removing zombie storage {}, which no " +
-"longer exists on the DataNode.",
-Long.toHexString(context.getReportId()), zombie.getStorageID());
-assert(namesystem.hasWriteLock());
-Iterator iter = zombie.getBlockIterator();
-int prevBlocks = zombie.numBlocks();
-while (iter.hasNext()) {
-  BlockInfo block = iter.next();
-  // We assume that a block can be on only one storage in a DataNode.
-  // That's why we pass in the DatanodeDescriptor 

hadoop git commit: HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for interleaving block reports. Contributed by Vinitha Gankidi.

2016-10-17 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 8eb0b6f39 -> 1fcaba9b1


HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for 
interleaving block reports. Contributed by Vinitha Gankidi.

(cherry picked from commit 391ce535a739dc92cb90017d759217265a4fd969)

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1fcaba9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1fcaba9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1fcaba9b

Branch: refs/heads/branch-2.7
Commit: 1fcaba9b14aa932e91f9cd18d4d98adb744e
Parents: 8eb0b6f
Author: Vinitha Reddy Gankidi 
Authored: Mon Oct 17 18:37:44 2016 -0700
Committer: Konstantin V Shvachko 
Committed: Mon Oct 17 18:37:44 2016 -0700

--
 .../server/blockmanagement/BlockManager.java| 55 ++---
 .../blockmanagement/DatanodeDescriptor.java | 49 ---
 .../blockmanagement/DatanodeStorageInfo.java| 11 
 .../hdfs/server/namenode/NameNodeRpcServer.java |  2 +-
 .../blockmanagement/TestBlockManager.java   | 10 +--
 .../TestNameNodePrunesMissingStorages.java  | 64 +++-
 .../server/datanode/BlockReportTestBase.java| 50 +++
 7 files changed, 122 insertions(+), 119 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fcaba9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 81a0d22..01fd66c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1107,6 +1107,8 @@ public class BlockManager {
   invalidateBlocks.remove(node, block);
 }
 namesystem.checkSafeMode();
+LOG.info("Removed blocks associated with storage {} from DataNode {}",
+storageInfo, node);
   }
 
   /**
@@ -1827,8 +1829,8 @@ public class BlockManager {
*/
   public boolean processReport(final DatanodeID nodeID,
   final DatanodeStorage storage,
-  final BlockListAsLongs newReport, BlockReportContext context,
-  boolean lastStorageInRpc) throws IOException {
+  final BlockListAsLongs newReport,
+  BlockReportContext context) throws IOException {
 namesystem.writeLock();
 final long startTime = Time.monotonicNow(); //after acquiring write lock
 final long endTime;
@@ -1870,29 +1872,6 @@ public class BlockManager {
   }
   
   storageInfo.receivedBlockReport();
-  if (context != null) {
-storageInfo.setLastBlockReportId(context.getReportId());
-if (lastStorageInRpc) {
-  int rpcsSeen = node.updateBlockReportContext(context);
-  if (rpcsSeen >= context.getTotalRpcs()) {
-List zombies = node.removeZombieStorages();
-if (zombies.isEmpty()) {
-  LOG.debug("processReport 0x{}: no zombie storages found.",
-  Long.toHexString(context.getReportId()));
-} else {
-  for (DatanodeStorageInfo zombie : zombies) {
-removeZombieReplicas(context, zombie);
-  }
-}
-node.clearBlockReportContext();
-  } else {
-LOG.debug("processReport 0x{}: {} more RPCs remaining in this " +
-"report.", Long.toHexString(context.getReportId()),
-(context.getTotalRpcs() - rpcsSeen)
-);
-  }
-}
-  }
 } finally {
   endTime = Time.monotonicNow();
   namesystem.writeUnlock();
@@ -1919,32 +1898,6 @@ public class BlockManager {
 return !node.hasStaleStorages();
   }
 
-  private void removeZombieReplicas(BlockReportContext context,
-  DatanodeStorageInfo zombie) {
-LOG.warn("processReport 0x{}: removing zombie storage {}, which no " +
- "longer exists on the DataNode.",
-  Long.toHexString(context.getReportId()), zombie.getStorageID());
-assert(namesystem.hasWriteLock());
-Iterator iter = zombie.getBlockIterator();
-int prevBlocks = zombie.numBlocks();
-while (iter.hasNext()) {
-  BlockInfoContiguous block = iter.next();
-  // We assume that a block can be on only one storage in a DataNode.
-  // That's why we pass in the DatanodeDescriptor rather than the
-  // DatanodeStorageInfo.
-  // TODO: remove this assumption 

[37/50] hadoop git commit: HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for interleaving block reports. Contributed by Vinitha Gankidi.

2016-10-17 Thread umamahesh
HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for 
interleaving block reports. Contributed by Vinitha Gankidi.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/391ce535
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/391ce535
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/391ce535

Branch: refs/heads/HDFS-10285
Commit: 391ce535a739dc92cb90017d759217265a4fd969
Parents: 30bb197
Author: Vinitha Reddy Gankidi 
Authored: Fri Oct 14 10:37:44 2016 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Oct 14 18:13:54 2016 -0700

--
 .../server/blockmanagement/BlockManager.java| 75 ++--
 .../blockmanagement/DatanodeDescriptor.java | 48 -
 .../blockmanagement/DatanodeStorageInfo.java| 11 ---
 .../hdfs/server/namenode/NameNodeRpcServer.java |  4 +-
 .../blockmanagement/TestBlockManager.java   | 19 +++--
 .../TestNameNodePrunesMissingStorages.java  | 70 +++---
 .../server/datanode/BlockReportTestBase.java| 50 +
 .../TestAddOverReplicatedStripedBlocks.java |  4 ++
 8 files changed, 147 insertions(+), 134 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/391ce535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 7949439..7b13add 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1347,6 +1347,8 @@ public class BlockManager implements BlockStatsMXBean {
   }
 }
 checkSafeMode();
+LOG.info("Removed blocks associated with storage {} from DataNode {}",
+storageInfo, node);
   }
 
   /**
@@ -2191,7 +2193,7 @@ public class BlockManager implements BlockStatsMXBean {
   public boolean processReport(final DatanodeID nodeID,
   final DatanodeStorage storage,
   final BlockListAsLongs newReport,
-  BlockReportContext context, boolean lastStorageInRpc) throws IOException 
{
+  BlockReportContext context) throws IOException {
 namesystem.writeLock();
 final long startTime = Time.monotonicNow(); //after acquiring write lock
 final long endTime;
@@ -2245,32 +2247,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
   
   storageInfo.receivedBlockReport();
-  if (context != null) {
-storageInfo.setLastBlockReportId(context.getReportId());
-if (lastStorageInRpc) {
-  int rpcsSeen = node.updateBlockReportContext(context);
-  if (rpcsSeen >= context.getTotalRpcs()) {
-long leaseId = blockReportLeaseManager.removeLease(node);
-BlockManagerFaultInjector.getInstance().
-removeBlockReportLease(node, leaseId);
-List zombies = node.removeZombieStorages();
-if (zombies.isEmpty()) {
-  LOG.debug("processReport 0x{}: no zombie storages found.",
-  Long.toHexString(context.getReportId()));
-} else {
-  for (DatanodeStorageInfo zombie : zombies) {
-removeZombieReplicas(context, zombie);
-  }
-}
-node.clearBlockReportContext();
-  } else {
-LOG.debug("processReport 0x{}: {} more RPCs remaining in this " +
-"report.", Long.toHexString(context.getReportId()),
-(context.getTotalRpcs() - rpcsSeen)
-);
-  }
-}
-  }
 } finally {
   endTime = Time.monotonicNow();
   namesystem.writeUnlock();
@@ -2295,36 +2271,25 @@ public class BlockManager implements BlockStatsMXBean {
 return !node.hasStaleStorages();
   }
 
-  private void removeZombieReplicas(BlockReportContext context,
-  DatanodeStorageInfo zombie) {
-LOG.warn("processReport 0x{}: removing zombie storage {}, which no " +
-"longer exists on the DataNode.",
-Long.toHexString(context.getReportId()), zombie.getStorageID());
-assert(namesystem.hasWriteLock());
-Iterator iter = zombie.getBlockIterator();
-int prevBlocks = zombie.numBlocks();
-while (iter.hasNext()) {
-  BlockInfo block = iter.next();
-  // We assume that a block can be on only one storage in a DataNode.
-  // That's why we pass in the DatanodeDescriptor 

[09/50] [abbrv] hadoop git commit: HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for interleaving block reports. Contributed by Vinitha Gankidi.

2016-10-17 Thread wangda
HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for 
interleaving block reports. Contributed by Vinitha Gankidi.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/391ce535
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/391ce535
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/391ce535

Branch: refs/heads/YARN-3368
Commit: 391ce535a739dc92cb90017d759217265a4fd969
Parents: 30bb197
Author: Vinitha Reddy Gankidi 
Authored: Fri Oct 14 10:37:44 2016 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Oct 14 18:13:54 2016 -0700

--
 .../server/blockmanagement/BlockManager.java| 75 ++--
 .../blockmanagement/DatanodeDescriptor.java | 48 -
 .../blockmanagement/DatanodeStorageInfo.java| 11 ---
 .../hdfs/server/namenode/NameNodeRpcServer.java |  4 +-
 .../blockmanagement/TestBlockManager.java   | 19 +++--
 .../TestNameNodePrunesMissingStorages.java  | 70 +++---
 .../server/datanode/BlockReportTestBase.java| 50 +
 .../TestAddOverReplicatedStripedBlocks.java |  4 ++
 8 files changed, 147 insertions(+), 134 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/391ce535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 7949439..7b13add 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1347,6 +1347,8 @@ public class BlockManager implements BlockStatsMXBean {
   }
 }
 checkSafeMode();
+LOG.info("Removed blocks associated with storage {} from DataNode {}",
+storageInfo, node);
   }
 
   /**
@@ -2191,7 +2193,7 @@ public class BlockManager implements BlockStatsMXBean {
   public boolean processReport(final DatanodeID nodeID,
   final DatanodeStorage storage,
   final BlockListAsLongs newReport,
-  BlockReportContext context, boolean lastStorageInRpc) throws IOException 
{
+  BlockReportContext context) throws IOException {
 namesystem.writeLock();
 final long startTime = Time.monotonicNow(); //after acquiring write lock
 final long endTime;
@@ -2245,32 +2247,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
   
   storageInfo.receivedBlockReport();
-  if (context != null) {
-storageInfo.setLastBlockReportId(context.getReportId());
-if (lastStorageInRpc) {
-  int rpcsSeen = node.updateBlockReportContext(context);
-  if (rpcsSeen >= context.getTotalRpcs()) {
-long leaseId = blockReportLeaseManager.removeLease(node);
-BlockManagerFaultInjector.getInstance().
-removeBlockReportLease(node, leaseId);
-List zombies = node.removeZombieStorages();
-if (zombies.isEmpty()) {
-  LOG.debug("processReport 0x{}: no zombie storages found.",
-  Long.toHexString(context.getReportId()));
-} else {
-  for (DatanodeStorageInfo zombie : zombies) {
-removeZombieReplicas(context, zombie);
-  }
-}
-node.clearBlockReportContext();
-  } else {
-LOG.debug("processReport 0x{}: {} more RPCs remaining in this " +
-"report.", Long.toHexString(context.getReportId()),
-(context.getTotalRpcs() - rpcsSeen)
-);
-  }
-}
-  }
 } finally {
   endTime = Time.monotonicNow();
   namesystem.writeUnlock();
@@ -2295,36 +2271,25 @@ public class BlockManager implements BlockStatsMXBean {
 return !node.hasStaleStorages();
   }
 
-  private void removeZombieReplicas(BlockReportContext context,
-  DatanodeStorageInfo zombie) {
-LOG.warn("processReport 0x{}: removing zombie storage {}, which no " +
-"longer exists on the DataNode.",
-Long.toHexString(context.getReportId()), zombie.getStorageID());
-assert(namesystem.hasWriteLock());
-Iterator iter = zombie.getBlockIterator();
-int prevBlocks = zombie.numBlocks();
-while (iter.hasNext()) {
-  BlockInfo block = iter.next();
-  // We assume that a block can be on only one storage in a DataNode.
-  // That's why we pass in the DatanodeDescriptor 

hadoop git commit: HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for interleaving block reports. Contributed by Vinitha Gankidi.

2016-10-14 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 b9fdbd710 -> 2304501bc


HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for 
interleaving block reports. Contributed by Vinitha Gankidi.

(cherry picked from commit 391ce535a739dc92cb90017d759217265a4fd969)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2304501b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2304501b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2304501b

Branch: refs/heads/branch-2.8
Commit: 2304501bcfcd3747a816e9408854424af6ee46ff
Parents: b9fdbd7
Author: Vinitha Reddy Gankidi 
Authored: Fri Oct 14 10:37:44 2016 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Oct 14 18:52:00 2016 -0700

--
 .../server/blockmanagement/BlockManager.java| 76 ++--
 .../blockmanagement/DatanodeDescriptor.java | 50 -
 .../blockmanagement/DatanodeStorageInfo.java| 11 ---
 .../hdfs/server/namenode/NameNodeRpcServer.java |  4 +-
 .../blockmanagement/TestBlockManager.java   | 12 ++--
 .../TestNameNodePrunesMissingStorages.java  | 70 +++---
 .../server/datanode/BlockReportTestBase.java| 50 +
 7 files changed, 145 insertions(+), 128 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2304501b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 39ac94d..8649e48 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -61,7 +61,6 @@ import 
org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
@@ -1233,6 +1232,8 @@ public class BlockManager implements BlockStatsMXBean {
   invalidateBlocks.remove(node, block);
 }
 namesystem.checkSafeMode();
+LOG.info("Removed blocks associated with storage {} from DataNode {}",
+storageInfo, node);
   }
 
   /**
@@ -1942,8 +1943,8 @@ public class BlockManager implements BlockStatsMXBean {
*/
   public boolean processReport(final DatanodeID nodeID,
   final DatanodeStorage storage,
-  final BlockListAsLongs newReport, BlockReportContext context,
-  boolean lastStorageInRpc) throws IOException {
+  final BlockListAsLongs newReport,
+  BlockReportContext context) throws IOException {
 namesystem.writeLock();
 final long startTime = Time.monotonicNow(); //after acquiring write lock
 final long endTime;
@@ -1997,32 +1998,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
   
   storageInfo.receivedBlockReport();
-  if (context != null) {
-storageInfo.setLastBlockReportId(context.getReportId());
-if (lastStorageInRpc) {
-  int rpcsSeen = node.updateBlockReportContext(context);
-  if (rpcsSeen >= context.getTotalRpcs()) {
-long leaseId = blockReportLeaseManager.removeLease(node);
-BlockManagerFaultInjector.getInstance().
-removeBlockReportLease(node, leaseId);
-List zombies = node.removeZombieStorages();
-if (zombies.isEmpty()) {
-  LOG.debug("processReport 0x{}: no zombie storages found.",
-  Long.toHexString(context.getReportId()));
-} else {
-  for (DatanodeStorageInfo zombie : zombies) {
-removeZombieReplicas(context, zombie);
-  }
-}
-node.clearBlockReportContext();
-  } else {
-LOG.debug("processReport 0x{}: {} more RPCs remaining in this " +
-"report.", Long.toHexString(context.getReportId()),
-(context.getTotalRpcs() - rpcsSeen)
-);
-  }
-}
-  }
 } finally {
   endTime = Time.monotonicNow();
   namesystem.writeUnlock();
@@ -2047,30 +2022,25 @@ public class BlockManager implements BlockStatsMXBean {
   

hadoop git commit: HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for interleaving block reports. Contributed by Vinitha Gankidi.

2016-10-14 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 78cb79fa9 -> 863e1020c


HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for 
interleaving block reports. Contributed by Vinitha Gankidi.

(cherry picked from commit 391ce535a739dc92cb90017d759217265a4fd969)

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/863e1020
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/863e1020
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/863e1020

Branch: refs/heads/branch-2
Commit: 863e1020c072d2f38249f1bcb22cb3517997575a
Parents: 78cb79f
Author: Vinitha Reddy Gankidi 
Authored: Fri Oct 14 10:37:44 2016 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Oct 14 18:25:52 2016 -0700

--
 .../server/blockmanagement/BlockManager.java| 76 ++--
 .../blockmanagement/DatanodeDescriptor.java | 50 -
 .../blockmanagement/DatanodeStorageInfo.java| 11 ---
 .../hdfs/server/namenode/NameNodeRpcServer.java |  4 +-
 .../blockmanagement/TestBlockManager.java   | 12 ++--
 .../TestNameNodePrunesMissingStorages.java  | 70 +++---
 .../server/datanode/BlockReportTestBase.java| 50 +
 7 files changed, 145 insertions(+), 128 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/863e1020/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 96dfa59..c282505 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -61,7 +61,6 @@ import 
org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
@@ -1218,6 +1217,8 @@ public class BlockManager implements BlockStatsMXBean {
   invalidateBlocks.remove(node, block);
 }
 checkSafeMode();
+LOG.info("Removed blocks associated with storage {} from DataNode {}",
+storageInfo, node);
   }
 
   /**
@@ -1983,8 +1984,8 @@ public class BlockManager implements BlockStatsMXBean {
*/
   public boolean processReport(final DatanodeID nodeID,
   final DatanodeStorage storage,
-  final BlockListAsLongs newReport, BlockReportContext context,
-  boolean lastStorageInRpc) throws IOException {
+  final BlockListAsLongs newReport,
+  BlockReportContext context) throws IOException {
 namesystem.writeLock();
 final long startTime = Time.monotonicNow(); //after acquiring write lock
 final long endTime;
@@ -2038,32 +2039,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
   
   storageInfo.receivedBlockReport();
-  if (context != null) {
-storageInfo.setLastBlockReportId(context.getReportId());
-if (lastStorageInRpc) {
-  int rpcsSeen = node.updateBlockReportContext(context);
-  if (rpcsSeen >= context.getTotalRpcs()) {
-long leaseId = blockReportLeaseManager.removeLease(node);
-BlockManagerFaultInjector.getInstance().
-removeBlockReportLease(node, leaseId);
-List zombies = node.removeZombieStorages();
-if (zombies.isEmpty()) {
-  LOG.debug("processReport 0x{}: no zombie storages found.",
-  Long.toHexString(context.getReportId()));
-} else {
-  for (DatanodeStorageInfo zombie : zombies) {
-removeZombieReplicas(context, zombie);
-  }
-}
-node.clearBlockReportContext();
-  } else {
-LOG.debug("processReport 0x{}: {} more RPCs remaining in this " +
-"report.", Long.toHexString(context.getReportId()),
-(context.getTotalRpcs() - rpcsSeen)
-);
-  }
-}
-  }
 } finally {
   endTime = Time.monotonicNow();
   namesystem.writeUnlock();
@@ -2088,30 +2063,25 @@ public class BlockManager implements BlockStatsMXBean {
 return 

hadoop git commit: HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for interleaving block reports. Contributed by Vinitha Gankidi.

2016-10-14 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/trunk 30bb1970c -> 391ce535a


HDFS-10301. Remove FBR tracking state to fix false zombie storage detection for 
interleaving block reports. Contributed by Vinitha Gankidi.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/391ce535
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/391ce535
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/391ce535

Branch: refs/heads/trunk
Commit: 391ce535a739dc92cb90017d759217265a4fd969
Parents: 30bb197
Author: Vinitha Reddy Gankidi 
Authored: Fri Oct 14 10:37:44 2016 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Oct 14 18:13:54 2016 -0700

--
 .../server/blockmanagement/BlockManager.java| 75 ++--
 .../blockmanagement/DatanodeDescriptor.java | 48 -
 .../blockmanagement/DatanodeStorageInfo.java| 11 ---
 .../hdfs/server/namenode/NameNodeRpcServer.java |  4 +-
 .../blockmanagement/TestBlockManager.java   | 19 +++--
 .../TestNameNodePrunesMissingStorages.java  | 70 +++---
 .../server/datanode/BlockReportTestBase.java| 50 +
 .../TestAddOverReplicatedStripedBlocks.java |  4 ++
 8 files changed, 147 insertions(+), 134 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/391ce535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 7949439..7b13add 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1347,6 +1347,8 @@ public class BlockManager implements BlockStatsMXBean {
   }
 }
 checkSafeMode();
+LOG.info("Removed blocks associated with storage {} from DataNode {}",
+storageInfo, node);
   }
 
   /**
@@ -2191,7 +2193,7 @@ public class BlockManager implements BlockStatsMXBean {
   public boolean processReport(final DatanodeID nodeID,
   final DatanodeStorage storage,
   final BlockListAsLongs newReport,
-  BlockReportContext context, boolean lastStorageInRpc) throws IOException 
{
+  BlockReportContext context) throws IOException {
 namesystem.writeLock();
 final long startTime = Time.monotonicNow(); //after acquiring write lock
 final long endTime;
@@ -2245,32 +2247,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
   
   storageInfo.receivedBlockReport();
-  if (context != null) {
-storageInfo.setLastBlockReportId(context.getReportId());
-if (lastStorageInRpc) {
-  int rpcsSeen = node.updateBlockReportContext(context);
-  if (rpcsSeen >= context.getTotalRpcs()) {
-long leaseId = blockReportLeaseManager.removeLease(node);
-BlockManagerFaultInjector.getInstance().
-removeBlockReportLease(node, leaseId);
-List zombies = node.removeZombieStorages();
-if (zombies.isEmpty()) {
-  LOG.debug("processReport 0x{}: no zombie storages found.",
-  Long.toHexString(context.getReportId()));
-} else {
-  for (DatanodeStorageInfo zombie : zombies) {
-removeZombieReplicas(context, zombie);
-  }
-}
-node.clearBlockReportContext();
-  } else {
-LOG.debug("processReport 0x{}: {} more RPCs remaining in this " +
-"report.", Long.toHexString(context.getReportId()),
-(context.getTotalRpcs() - rpcsSeen)
-);
-  }
-}
-  }
 } finally {
   endTime = Time.monotonicNow();
   namesystem.writeUnlock();
@@ -2295,36 +2271,25 @@ public class BlockManager implements BlockStatsMXBean {
 return !node.hasStaleStorages();
   }
 
-  private void removeZombieReplicas(BlockReportContext context,
-  DatanodeStorageInfo zombie) {
-LOG.warn("processReport 0x{}: removing zombie storage {}, which no " +
-"longer exists on the DataNode.",
-Long.toHexString(context.getReportId()), zombie.getStorageID());
-assert(namesystem.hasWriteLock());
-Iterator iter = zombie.getBlockIterator();
-int prevBlocks = zombie.numBlocks();
-while (iter.hasNext()) {
-  BlockInfo block = iter.next();
-  // We assume that a block can be on only one