Author: dhruba
Date: Thu Feb 14 11:56:03 2008
New Revision: 627860
URL: http://svn.apache.org/viewvc?rev=627860&view=rev
Log:
HADOOP-2832. Remove tabs from code of DFSClient for better
indentation. (dhruba)
Modified:
hadoop/core/trunk/CHANGES.txt
hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java
hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DataNode.java
Modified: hadoop/core/trunk/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=627860&r1=627859&r2=627860&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Thu Feb 14 11:56:03 2008
@@ -41,6 +41,9 @@
HADOOP-2191. du and dus command on non-existent directory gives
appropriate error message. (Mahadev Konar via dhruba)
+ HADOOP-2832. Remove tabs from code of DFSClient for better
+ indentation. (dhruba)
+
Release 0.16.1 - Unrelease
BUG FIXES
Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java?rev=627860&r1=627859&r2=627860&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java Thu Feb 14
11:56:03 2008
@@ -1632,9 +1632,9 @@
" lastPacketInBlock:" + one.lastPacketInBlock);
} catch (IOException e) {
LOG.warn("DataStreamer Exception: " + e);
- hasError = true;
- }
- }
+ hasError = true;
+ }
+ }
if (closed || hasError || !clientRunning) {
continue;
@@ -1684,8 +1684,8 @@
Thread.sleep(artificialSlowdown);
} catch (InterruptedException e) {}
}
- }
- }
+ }
+ }
// shutdown thread
void close() {
@@ -1698,12 +1698,12 @@
}
this.interrupt();
}
- }
-
+ }
+
+ //
+ // Processes reponses from the datanodes. A packet is removed
+ // from the ackQueue when its response arrives.
//
- // Processes reponses from the datanodes. A packet is removed
- // from the ackQueue when its response arrives.
- //
private class ResponseProcessor extends Thread {
private volatile boolean closed = false;
@@ -1714,31 +1714,31 @@
this.targets = targets;
}
- public void run() {
+ public void run() {
this.setName("ResponseProcessor for block " + block);
while (!closed && clientRunning && !lastPacketInBlock) {
- // process responses from datanodes.
- try {
- // verify seqno from datanode
+ // process responses from datanodes.
+ try {
+ // verify seqno from datanode
int numTargets = -1;
- long seqno = blockReplyStream.readLong();
+ long seqno = blockReplyStream.readLong();
LOG.debug("DFSClient received ack for seqno " + seqno);
if (seqno == -1) {
continue;
} else if (seqno == -2) {
// no nothing
} else {
- Packet one = null;
- synchronized (ackQueue) {
- one = ackQueue.getFirst();
- }
- if (one.seqno != seqno) {
- throw new IOException("Responseprocessor:
Expecting seqno " +
+ Packet one = null;
+ synchronized (ackQueue) {
+ one = ackQueue.getFirst();
+ }
+ if (one.seqno != seqno) {
+ throw new IOException("Responseprocessor: Expecting seqno "
+
" for block " + block +
- one.seqno + " but received
" + seqno);
- }
+ one.seqno + " but received " + seqno);
+ }
lastPacketInBlock = one.lastPacketInBlock;
}
Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DataNode.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DataNode.java?rev=627860&r1=627859&r2=627860&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DataNode.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DataNode.java Thu Feb 14
11:56:03 2008
@@ -171,7 +171,7 @@
* This method starts the data node with the specified conf.
*
* @param conf - the configuration
- * if conf's CONFIG_PROPERTY_SIMULATED property is set
+ * if conf's CONFIG_PROPERTY_SIMULATED property is set
* then a simulated storage based data node is created.
*
* @param dataDirs - only for a non-simulated storage data node
@@ -262,7 +262,7 @@
if (this.initialBlockReportDelay >= blockReportIntervalBasis) {
this.initialBlockReportDelay = 0;
LOG.info("dfs.blockreport.initialDelay is greater than " +
- "dfs.blockreport.intervalMsec." + " Setting initial delay to 0 msec:");
+ "dfs.blockreport.intervalMsec." + " Setting initial delay to 0 msec:");
}
this.heartBeatInterval = conf.getLong("dfs.heartbeat.interval",
HEARTBEAT_INTERVAL) * 1000L;
DataNode.nameNodeAddr = nameNodeAddr;
@@ -1981,7 +1981,7 @@
// Open local disk out
//
streams = data.writeToBlock(block, isRecovery);
- this.finalized = data.isValidBlock(block);
+ this.finalized = data.isValidBlock(block);
if (streams != null) {
this.bufStream = new DFSBufferedOutputStream(
streams.dataOut, BUFFER_SIZE);
@@ -2644,7 +2644,7 @@
public void scheduleBlockReport(long delay) {
if (delay > 0) { // send BR after random delay
lastBlockReport = System.currentTimeMillis()
- - ( blockReportInterval
- new Random().nextInt((int)(delay)));
+ - ( blockReportInterval - new
Random().nextInt((int)(delay)));
} else { // send at next heartbeat
lastBlockReport = lastHeartbeat - blockReportInterval;
}