[ 
https://issues.apache.org/jira/browse/HDFS-15877?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Haiyang Hu updated HDFS-15877:
------------------------------
    Description: 
the related code is in BlockManager.java
{code:java}
// code placeholder


{code}
private boolean validateReconstructionWork(BlockReconstructionWork rw) {
 BlockInfo block = rw.getBlock();
 int priority = rw.getPriority();
 // Recheck since global lock was released
 // skip abandoned block or block reopened for append
 if (block.isDeleted() || !block.isCompleteOrCommitted())

{ neededReconstruction.remove(block, priority); rw.resetTargets(); return 
false; }

// do not schedule more if enough replicas is already pending
 NumberReplicas numReplicas = countNodes(block);
 final short requiredRedundancy =
 getExpectedLiveRedundancyNum(block, numReplicas);
 final int pendingNum = pendingReconstruction.getNumReplicas(block);
 if (hasEnoughEffectiveReplicas(block, numReplicas, pendingNum)) {
 neededReconstruction.remove(block, priority);
 rw.resetTargets();
 blockLog.debug("BLOCK* Removing {} from neededReconstruction as" +
 " it has enough replicas", block);
 return false;
 }

DatanodeStorageInfo[] targets = rw.getTargets();
 BlockPlacementStatus placementStatus = getBlockPlacementStatus(block);
 if ((numReplicas.liveReplicas() >= requiredRedundancy) &&
 (!placementStatus.isPlacementPolicySatisfied())) {
 BlockPlacementStatus newPlacementStatus =
 getBlockPlacementStatus(block, targets);
 if (!newPlacementStatus.isPlacementPolicySatisfied() &&
 (newPlacementStatus.getAdditionalReplicasRequired() >=
 placementStatus.getAdditionalReplicasRequired()))

{ // If the new targets do not meet the placement policy, or at least // reduce 
the number of replicas needed, then no use continuing. return false; }

// mark that the reconstruction work is to replicate internal block to a
 // new rack.
 rw.setNotEnoughRack(); // Here to add
 }

  was:
the related code is in BlockManager.java
{code:java}
 {code}
private boolean validateReconstructionWork(BlockReconstructionWork rw) {
 BlockInfo block = rw.getBlock();
 int priority = rw.getPriority();
 // Recheck since global lock was released
 // skip abandoned block or block reopened for append
 if (block.isDeleted() || !block.isCompleteOrCommitted())

{ neededReconstruction.remove(block, priority); rw.resetTargets(); return 
false; }

// do not schedule more if enough replicas is already pending
 NumberReplicas numReplicas = countNodes(block);
 final short requiredRedundancy =
 getExpectedLiveRedundancyNum(block, numReplicas);
 final int pendingNum = pendingReconstruction.getNumReplicas(block);
 if (hasEnoughEffectiveReplicas(block, numReplicas, pendingNum)) {
 neededReconstruction.remove(block, priority);
 rw.resetTargets();
 blockLog.debug("BLOCK* Removing {} from neededReconstruction as" +
 " it has enough replicas", block);
 return false;
 }

DatanodeStorageInfo[] targets = rw.getTargets();
 BlockPlacementStatus placementStatus = getBlockPlacementStatus(block);
 if ((numReplicas.liveReplicas() >= requiredRedundancy) &&
 (!placementStatus.isPlacementPolicySatisfied())) {
 BlockPlacementStatus newPlacementStatus =
 getBlockPlacementStatus(block, targets);
 if (!newPlacementStatus.isPlacementPolicySatisfied() &&
 (newPlacementStatus.getAdditionalReplicasRequired() >=
 placementStatus.getAdditionalReplicasRequired()))

{ // If the new targets do not meet the placement policy, or at least // reduce 
the number of replicas needed, then no use continuing. return false; }

// mark that the reconstruction work is to replicate internal block to a
 // new rack.
 rw.setNotEnoughRack();
 }


> BlockReconstructionWork should resetTargets() before 
> BlockManager#validateReconstructionWork return false
> ---------------------------------------------------------------------------------------------------------
>
>                 Key: HDFS-15877
>                 URL: https://issues.apache.org/jira/browse/HDFS-15877
>             Project: Hadoop HDFS
>          Issue Type: Improvement
>            Reporter: Haiyang Hu
>            Priority: Minor
>
> the related code is in BlockManager.java
> {code:java}
> // code placeholder
> {code}
> private boolean validateReconstructionWork(BlockReconstructionWork rw) {
>  BlockInfo block = rw.getBlock();
>  int priority = rw.getPriority();
>  // Recheck since global lock was released
>  // skip abandoned block or block reopened for append
>  if (block.isDeleted() || !block.isCompleteOrCommitted())
> { neededReconstruction.remove(block, priority); rw.resetTargets(); return 
> false; }
> // do not schedule more if enough replicas is already pending
>  NumberReplicas numReplicas = countNodes(block);
>  final short requiredRedundancy =
>  getExpectedLiveRedundancyNum(block, numReplicas);
>  final int pendingNum = pendingReconstruction.getNumReplicas(block);
>  if (hasEnoughEffectiveReplicas(block, numReplicas, pendingNum)) {
>  neededReconstruction.remove(block, priority);
>  rw.resetTargets();
>  blockLog.debug("BLOCK* Removing {} from neededReconstruction as" +
>  " it has enough replicas", block);
>  return false;
>  }
> DatanodeStorageInfo[] targets = rw.getTargets();
>  BlockPlacementStatus placementStatus = getBlockPlacementStatus(block);
>  if ((numReplicas.liveReplicas() >= requiredRedundancy) &&
>  (!placementStatus.isPlacementPolicySatisfied())) {
>  BlockPlacementStatus newPlacementStatus =
>  getBlockPlacementStatus(block, targets);
>  if (!newPlacementStatus.isPlacementPolicySatisfied() &&
>  (newPlacementStatus.getAdditionalReplicasRequired() >=
>  placementStatus.getAdditionalReplicasRequired()))
> { // If the new targets do not meet the placement policy, or at least // 
> reduce the number of replicas needed, then no use continuing. return false; }
> // mark that the reconstruction work is to replicate internal block to a
>  // new rack.
>  rw.setNotEnoughRack(); // Here to add
>  }



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

---------------------------------------------------------------------
To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org

Reply via email to