goiri commented on a change in pull request #2055:
URL: https://github.com/apache/hadoop/pull/2055#discussion_r437711091
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
##########
@@ -125,70 +128,54 @@ boolean decrement(BlockInfo block, DatanodeStorageInfo
dn) {
* removed
*/
PendingBlockInfo remove(BlockInfo block) {
- synchronized (pendingReconstructions) {
- return pendingReconstructions.remove(block);
- }
+ return pendingReconstructions.remove(block);
}
public void clear() {
- synchronized (pendingReconstructions) {
pendingReconstructions.clear();
- synchronized (timedOutItems) {
- timedOutItems.clear();
- }
+ timedOutItems.clear();
timedOutCount = 0L;
- }
}
/**
* The total number of blocks that are undergoing reconstruction.
*/
int size() {
- synchronized (pendingReconstructions) {
- return pendingReconstructions.size();
- }
+ return pendingReconstructions.size();
}
/**
* How many copies of this block is pending reconstruction?.
*/
int getNumReplicas(BlockInfo block) {
- synchronized (pendingReconstructions) {
- PendingBlockInfo found = pendingReconstructions.get(block);
- if (found != null) {
- return found.getNumReplicas();
- }
- }
- return 0;
+ PendingBlockInfo found = pendingReconstructions.get(block);
+ return (found == null) ? 0 : found.getNumReplicas();
}
/**
* Used for metrics.
* @return The number of timeouts
*/
long getNumTimedOuts() {
- synchronized (timedOutItems) {
- return timedOutCount + timedOutItems.size();
- }
+ return timedOutCount + timedOutItems.size();
Review comment:
Unlikely but this can still be technically be inconsistent, right?
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
##########
@@ -296,46 +275,42 @@ void pendingReconstructionCheck() {
public Daemon getTimerThread() {
return timerThread;
}
- /*
- * Shuts down the pending reconstruction monitor thread.
- * Waits for the thread to exit.
+
+ /**
+ * Shuts down the pending reconstruction monitor thread. Waits for the thread
+ * to exit.
*/
void stop() {
- fsRunning = false;
- if(timerThread == null) return;
+ if (timerThread == null)
+ return;
Review comment:
Add keys?
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
##########
@@ -296,46 +275,42 @@ void pendingReconstructionCheck() {
public Daemon getTimerThread() {
return timerThread;
}
- /*
- * Shuts down the pending reconstruction monitor thread.
- * Waits for the thread to exit.
+
+ /**
+ * Shuts down the pending reconstruction monitor thread. Waits for the thread
+ * to exit.
*/
void stop() {
- fsRunning = false;
Review comment:
I can of like this. You think catching the interrupt is enough?
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
##########
@@ -296,46 +275,42 @@ void pendingReconstructionCheck() {
public Daemon getTimerThread() {
return timerThread;
}
- /*
- * Shuts down the pending reconstruction monitor thread.
- * Waits for the thread to exit.
+
+ /**
+ * Shuts down the pending reconstruction monitor thread. Waits for the thread
+ * to exit.
*/
void stop() {
- fsRunning = false;
- if(timerThread == null) return;
+ if (timerThread == null)
+ return;
timerThread.interrupt();
try {
timerThread.join(3000);
} catch (InterruptedException ie) {
+ LOG.debug("PendingReconstructionMonitor stop is interrupted", ie);
}
}
/**
* Iterate through all items and print them.
*/
void metaSave(PrintWriter out) {
- synchronized (pendingReconstructions) {
Review comment:
Why is there no need anymore?
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
##########
@@ -217,20 +203,16 @@ void setTimeStamp() {
}
void incrementReplicas(DatanodeStorageInfo... newTargets) {
- if (newTargets != null) {
- for (DatanodeStorageInfo newTarget : newTargets) {
- if (!targets.contains(newTarget)) {
- targets.add(newTarget);
- }
- }
+ for (DatanodeStorageInfo newTarget : newTargets) {
+ targets.add(newTarget);
}
}
void decrementReplicas(DatanodeStorageInfo dn) {
Iterator<DatanodeStorageInfo> iterator = targets.iterator();
while (iterator.hasNext()) {
DatanodeStorageInfo next = iterator.next();
- if (next.getDatanodeDescriptor() == dn.getDatanodeDescriptor()) {
+ if (next.getDatanodeDescriptor().equals(dn.getDatanodeDescriptor())) {
Review comment:
This looks pretty bad... could this have had a negative impact in the
past?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]