Author: rangadi
Date: Wed Mar 4 03:13:06 2009
New Revision: 749888
URL: http://svn.apache.org/viewvc?rev=749888&view=rev
Log:
HADOOP-4103. NameNode keeps a count of missing blocks. It warns on
WebUI if there are such blocks. '-report' and '-metaSave' have extra
info to track such blocks. (Raghu Angadi)
Added:
hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
- copied unchanged from r749863,
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
Modified:
hadoop/core/branches/branch-0.20/ (props changed)
hadoop/core/branches/branch-0.20/CHANGES.txt (contents, props changed)
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java
hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/DFSTestUtil.java
hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
hadoop/core/branches/branch-0.20/src/webapps/hdfs/dfshealth.jsp
hadoop/core/branches/branch-0.20/src/webapps/static/hadoop.css
Propchange: hadoop/core/branches/branch-0.20/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Mar 4 03:13:06 2009
@@ -1,2 +1,2 @@
/hadoop/core/branches/branch-0.19:713112
-/hadoop/core/trunk:727001,727117,727191,727212,727217,727228,727255,727869,728187,729052,729987,732385,732572,732613,732777,732838,732869,733887,734870,734916,736426,738328,738697,740077,740157,741703,741762,743745,743816,743892,744894,745180,746010,746206,746227,746233,746274,746902-746903,746944,746968,746970,747279,747802,748084,748090,748783,749262,749318
+/hadoop/core/trunk:727001,727117,727191,727212,727217,727228,727255,727869,728187,729052,729987,732385,732572,732613,732777,732838,732869,733887,734870,734916,736426,738328,738697,740077,740157,741703,741762,743745,743816,743892,744894,745180,746010,746206,746227,746233,746274,746902-746903,746944,746968,746970,747279,747802,748084,748090,748783,749262,749318,749863
Modified: hadoop/core/branches/branch-0.20/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/CHANGES.txt?rev=749888&r1=749887&r2=749888&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/CHANGES.txt (original)
+++ hadoop/core/branches/branch-0.20/CHANGES.txt Wed Mar 4 03:13:06 2009
@@ -67,6 +67,10 @@
HADOOP-4970. The full path name of a file is preserved inside Trash.
(Prasad Chakka via dhruba)
+ HADOOP-4103. NameNode keeps a count of missing blocks. It warns on
+ WebUI if there are such blocks. '-report' and '-metaSave' have extra
+ info to track such blocks. (Raghu Angadi)
+
NEW FEATURES
HADOOP-4575. Add a proxy service for relaying HsftpFileSystem requests.
Propchange: hadoop/core/branches/branch-0.20/CHANGES.txt
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Mar 4 03:13:06 2009
@@ -1,3 +1,3 @@
/hadoop/core/branches/branch-0.18/CHANGES.txt:727226
/hadoop/core/branches/branch-0.19/CHANGES.txt:713112
-/hadoop/core/trunk/CHANGES.txt:727001,727117,727191,727212,727228,727255,727869,728187,729052,729987,732385,732572,732613,732777,732838,732869,733887,734870,734916,735082,736426,738602,738697,739416,740077,740157,741703,741762,743296,743745,743816,743892,744894,745180,745268,746010,746193,746206,746227,746233,746274,746902-746903,746944,746968,746970,747279,747802,748084,748090,748783,749262,749318
+/hadoop/core/trunk/CHANGES.txt:727001,727117,727191,727212,727228,727255,727869,728187,729052,729987,732385,732572,732613,732777,732838,732869,733887,734870,734916,735082,736426,738602,738697,739416,740077,740157,741703,741762,743296,743745,743816,743892,744894,745180,745268,746010,746193,746206,746227,746233,746274,746902-746903,746944,746968,746970,747279,747802,748084,748090,748783,749262,749318,749863
Modified:
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
URL:
http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java?rev=749888&r1=749887&r2=749888&view=diff
==============================================================================
---
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
(original)
+++
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
Wed Mar 4 03:13:06 2009
@@ -757,6 +757,31 @@
return rawNums[1];
}
+ /**
+ * Returns count of blocks with no good replicas left. Normally should be
+ * zero.
+ * @throws IOException
+ */
+ public long getMissingBlocksCount() throws IOException {
+ return namenode.getStats()[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX];
+ }
+
+ /**
+ * Returns count of blocks with one of more replica missing.
+ * @throws IOException
+ */
+ public long getUnderReplicatedBlocksCount() throws IOException {
+ return namenode.getStats()[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX];
+ }
+
+ /**
+ * Returns count of blocks with at least one replica marked corrupt.
+ * @throws IOException
+ */
+ public long getCorruptBlocksCount() throws IOException {
+ return namenode.getStats()[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX];
+ }
+
public DatanodeInfo[] datanodeReport(DatanodeReportType type)
throws IOException {
return namenode.getDatanodeReport(type);
Modified:
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL:
http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=749888&r1=749887&r2=749888&view=diff
==============================================================================
---
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
(original)
+++
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
Wed Mar 4 03:13:06 2009
@@ -24,6 +24,7 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.protocol.Block;
@@ -318,6 +319,34 @@
public long getRawUsed() throws IOException{
return dfs.totalRawUsed();
}
+
+ /**
+ * Returns count of blocks with no good replicas left. Normally should be
+ * zero.
+ *
+ * @throws IOException
+ */
+ public long getMissingBlocksCount() throws IOException {
+ return dfs.getMissingBlocksCount();
+ }
+
+ /**
+ * Returns count of blocks with one of more replica missing.
+ *
+ * @throws IOException
+ */
+ public long getUnderReplicatedBlocksCount() throws IOException {
+ return dfs.getUnderReplicatedBlocksCount();
+ }
+
+ /**
+ * Returns count of blocks with at least one replica marked corrupt.
+ *
+ * @throws IOException
+ */
+ public long getCorruptBlocksCount() throws IOException {
+ return dfs.getCorruptBlocksCount();
+ }
/** Return statistics for each datanode. */
public DatanodeInfo[] getDataNodeStats() throws IOException {
Modified:
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL:
http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=749888&r1=749887&r2=749888&view=diff
==============================================================================
---
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
(original)
+++
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
Wed Mar 4 03:13:06 2009
@@ -278,6 +278,13 @@
*/
public void renewLease(String clientName) throws IOException;
+ public int GET_STATS_CAPACITY_IDX = 0;
+ public int GET_STATS_USED_IDX = 1;
+ public int GET_STATS_REMAINING_IDX = 2;
+ public int GET_STATS_UNDER_REPLICATED_IDX = 3;
+ public int GET_STATS_CORRUPT_BLOCKS_IDX = 4;
+ public int GET_STATS_MISSING_BLOCKS_IDX = 5;
+
/**
* Get a set of statistics about the filesystem.
* Right now, only three values are returned.
@@ -285,7 +292,12 @@
* <li> [0] contains the total storage capacity of the system, in bytes.</li>
* <li> [1] contains the total used space of the system, in bytes.</li>
* <li> [2] contains the available storage of the system, in bytes.</li>
+ * <li> [3] contains number of under replicated blocks in the system.</li>
+ * <li> [4] contains number of blocks with a corrupt replica. </li>
+ * <li> [5] contains number of blocks without any good replicas left. </li>
* </ul>
+ * Use public constants like {...@link #GET_STATS_CAPACITY_IDX} in place of
+ * actual numbers to index into the array.
*/
public long[] getStats() throws IOException;
Modified:
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java
URL:
http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java?rev=749888&r1=749887&r2=749888&view=diff
==============================================================================
---
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java
(original)
+++
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java
Wed Mar 4 03:13:06 2009
@@ -130,4 +130,8 @@
Collection<DatanodeDescriptor> nodes = getNodes(blk);
return (nodes == null) ? 0 : nodes.size();
}
+
+ public int size() {
+ return corruptReplicasMap.size();
+ }
}
Modified:
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL:
http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=749888&r1=749887&r2=749888&view=diff
==============================================================================
---
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
(original)
+++
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
Wed Mar 4 03:13:06 2009
@@ -124,7 +124,7 @@
private FSNamesystemMetrics myFSMetrics;
private long capacityTotal = 0L, capacityUsed = 0L, capacityRemaining = 0L;
private int totalLoad = 0;
- private long pendingReplicationBlocksCount = 0L,
+ private long pendingReplicationBlocksCount = 0L, corruptReplicaBlocksCount,
underReplicatedBlocksCount = 0L, scheduledReplicationBlocksCount = 0L;
//
@@ -242,6 +242,8 @@
* Last block index used for replication work.
*/
private int replIndex = 0;
+ private long missingBlocksInCurIter = 0;
+ private long missingBlocksInPrevIter = 0;
public static FSNamesystem fsNamesystemObject;
/** NameNode RPC address */
@@ -487,7 +489,7 @@
/**
* Dump all metadata into specified file
*/
- void metaSave(String filename) throws IOException {
+ synchronized void metaSave(String filename) throws IOException {
checkSuperuserPrivilege();
File file = new File(System.getProperty("hadoop.log.dir"),
filename);
@@ -502,7 +504,21 @@
out.println("Metasave: Blocks waiting for replication: " +
neededReplications.size());
for (Block block : neededReplications) {
- out.print(block);
+ List<DatanodeDescriptor> containingNodes =
+ new ArrayList<DatanodeDescriptor>();
+ NumberReplicas numReplicas = new NumberReplicas();
+ // source node returned is not used
+ chooseSourceDatanode(block, containingNodes, numReplicas);
+ int usableReplicas = numReplicas.liveReplicas() +
+ numReplicas.decommissionedReplicas();
+ // l: == live:, d: == decommissioned c: == corrupt e: == excess
+ out.print(block + " (replicas:" +
+ " l: " + numReplicas.liveReplicas() +
+ " d: " + numReplicas.decommissionedReplicas() +
+ " c: " + numReplicas.corruptReplicas() +
+ " e: " + numReplicas.excessReplicas() +
+ ((usableReplicas > 0)? "" : " MISSING") + ")");
+
for (Iterator<DatanodeDescriptor> jt = blocksMap.nodeIterator(block);
jt.hasNext();) {
DatanodeDescriptor node = jt.next();
@@ -2313,9 +2329,12 @@
workFound = computeReplicationWork(blocksToProcess);
// Update FSNamesystemMetrics counters
- pendingReplicationBlocksCount = pendingReplications.size();
- underReplicatedBlocksCount = neededReplications.size();
- scheduledReplicationBlocksCount = workFound;
+ synchronized (this) {
+ pendingReplicationBlocksCount = pendingReplications.size();
+ underReplicatedBlocksCount = neededReplications.size();
+ scheduledReplicationBlocksCount = workFound;
+ corruptReplicaBlocksCount = corruptReplicas.size();
+ }
if(workFound == 0)
workFound = computeInvalidateWork(nodesToProcess);
@@ -2347,6 +2366,10 @@
int scheduledReplicationCount = 0;
synchronized(neededReplications) {
+ if (neededReplications.size() == 0) {
+ missingBlocksInCurIter = 0;
+ missingBlocksInPrevIter = 0;
+ }
// # of blocks to process equals either twice the number of live
// data-nodes or the number of under-replicated blocks whichever is less
blocksToProcess = Math.min(blocksToProcess, neededReplications.size());
@@ -2365,6 +2388,8 @@
if( ! neededReplicationsIterator.hasNext()) {
// start from the beginning
replIndex = 0;
+ missingBlocksInPrevIter = missingBlocksInCurIter;
+ missingBlocksInCurIter = 0;
blocksToProcess = Math.min(blocksToProcess,
neededReplications.size());
if(blkCnt >= blocksToProcess)
break;
@@ -2391,6 +2416,11 @@
NumberReplicas numReplicas = new NumberReplicas();
DatanodeDescriptor srcNode =
chooseSourceDatanode(block, containingNodes, numReplicas);
+
+ if ((numReplicas.liveReplicas() + numReplicas.decommissionedReplicas())
+ <= 0) {
+ missingBlocksInCurIter++;
+ }
if(srcNode == null) // block can not be replicated from any node
continue;
@@ -3258,11 +3288,19 @@
addStoredBlock(block, node, delHintNode );
}
+ public long getMissingBlocksCount() {
+ // not locking
+ return Math.max(missingBlocksInPrevIter, missingBlocksInCurIter);
+ }
+
long[] getStats() throws IOException {
checkSuperuserPrivilege();
synchronized(heartbeats) {
- return new long[]
- {getCapacityTotal(), getCapacityUsed(), getCapacityRemaining()};
+ return new long[] {this.capacityTotal, this.capacityUsed,
+ this.capacityRemaining,
+ this.underReplicatedBlocksCount,
+ this.corruptReplicaBlocksCount,
+ getMissingBlocksCount()};
}
}
@@ -4393,6 +4431,11 @@
return underReplicatedBlocksCount;
}
+ /** Returns number of blocks with corrupt replicas */
+ public long getCorruptReplicaBlocksCount() {
+ return corruptReplicaBlocksCount;
+ }
+
public long getScheduledReplicationBlocks() {
return scheduledReplicationBlocksCount;
}
Modified:
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java
URL:
http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java?rev=749888&r1=749887&r2=749888&view=diff
==============================================================================
---
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java
(original)
+++
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/JspHelper.java
Wed Mar 4 03:13:06 2009
@@ -195,6 +195,17 @@
return "Safe mode is ON. <em>" + fsn.getSafeModeTip() + "</em><br>";
}
+ public static String getWarningText(FSNamesystem fsn) {
+ // Ideally this should be displayed in RED
+ long missingBlocks = fsn.getMissingBlocksCount();
+ if (missingBlocks > 0) {
+ return "<br> WARNING :" +
+ " There are about " + missingBlocks +
+ " missing blocks. Please check the log or run fsck. <br><br>";
+ }
+ return "";
+ }
+
public String getInodeLimitText() {
long inodes = fsn.dir.totalInodes();
long blocks = fsn.getBlocksTotal();
Modified:
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java
URL:
http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java?rev=749888&r1=749887&r2=749888&view=diff
==============================================================================
---
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java
(original)
+++
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java
Wed Mar 4 03:13:06 2009
@@ -79,7 +79,7 @@
if (decommissionedReplicas > 0) {
return 0;
}
- return LEVEL; // no need to replicate
+ return 2; // keep these blocks in needed replication.
} else if(curReplicas==1) {
return 0; // highest priority
} else if(curReplicas*3<expectedReplicas) {
Modified:
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java
URL:
http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java?rev=749888&r1=749887&r2=749888&view=diff
==============================================================================
---
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java
(original)
+++
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java
Wed Mar 4 03:13:06 2009
@@ -55,6 +55,7 @@
public MetricsIntValue pendingReplicationBlocks = new
MetricsIntValue("PendingReplicationBlocks", registry);
public MetricsIntValue underReplicatedBlocks = new
MetricsIntValue("UnderReplicatedBlocks", registry);
public MetricsIntValue scheduledReplicationBlocks = new
MetricsIntValue("ScheduledReplicationBlocks", registry);
+ public MetricsIntValue missingBlocks = new MetricsIntValue("MissingBlocks",
registry);
public FSNamesystemMetrics(Configuration conf) {
String sessionId = conf.get("session.id");
@@ -104,6 +105,7 @@
underReplicatedBlocks.set((int)fsNameSystem.getUnderReplicatedBlocks());
scheduledReplicationBlocks.set((int)fsNameSystem.
getScheduledReplicationBlocks());
+ missingBlocks.set((int)fsNameSystem.getMissingBlocksCount());
for (MetricsBase m : registry.getMetricsList()) {
m.pushMetric(metricsRecord);
Modified:
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL:
http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=749888&r1=749887&r2=749888&view=diff
==============================================================================
---
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java
(original)
+++
hadoop/core/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java
Wed Mar 4 03:13:06 2009
@@ -286,6 +286,19 @@
System.out.println("DFS Used%: "
+ StringUtils.limitDecimalTo2(((1.0 * used) /
presentCapacity) * 100)
+ "%");
+
+ /* These counts are not always upto date. They are updated after
+ * iteration of an internal list. Should be updated in a few seconds to
+ * minutes. Use "-metaSave" to list of all such blocks and accurate
+ * counts.
+ */
+ System.out.println("Under replicated blocks: " +
+ dfs.getUnderReplicatedBlocksCount());
+ System.out.println("Blocks with corrupt replicas: " +
+ dfs.getCorruptBlocksCount());
+ System.out.println("Missing blocks: " +
+ dfs.getMissingBlocksCount());
+
System.out.println();
System.out.println("-------------------------------------------------");
Modified:
hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/DFSTestUtil.java
URL:
http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=749888&r1=749887&r2=749888&view=diff
==============================================================================
---
hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/DFSTestUtil.java
(original)
+++
hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/DFSTestUtil.java
Wed Mar 4 03:13:06 2009
@@ -19,9 +19,12 @@
package org.apache.hadoop.hdfs;
import java.io.BufferedReader;
+import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
+import java.net.URL;
+import java.net.URLConnection;
import java.util.Random;
import junit.framework.TestCase;
import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
@@ -256,4 +259,12 @@
in.close();
return b.toString();
}
+
+ // Returns url content as string.
+ public static String urlGet(URL url) throws IOException {
+ URLConnection conn = url.openConnection();
+ ByteArrayOutputStream out = new ByteArrayOutputStream();
+ IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
+ return out.toString();
+ }
}
Modified:
hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
URL:
http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java?rev=749888&r1=749887&r2=749888&view=diff
==============================================================================
---
hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
(original)
+++
hadoop/core/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
Wed Mar 4 03:13:06 2009
@@ -18,11 +18,9 @@
package org.apache.hadoop.hdfs;
-import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URL;
-import java.net.URLConnection;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.io.*;
@@ -50,19 +48,6 @@
private static final Log LOG =
LogFactory.getLog(TestDatanodeBlockScanner.class);
- private static String urlGet(URL url) {
- try {
- URLConnection conn = url.openConnection();
- ByteArrayOutputStream out = new ByteArrayOutputStream();
- IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
- return out.toString();
- } catch (IOException e) {
- LOG.warn("Failed to fetch " + url.toString() + " : " +
- e.getMessage());
- }
- return "";
- }
-
private static Pattern pattern =
Pattern.compile(".*?(blk_[-]*\\d+).*?scan time\\s*:\\s*(\\d+)");
/**
@@ -79,7 +64,7 @@
String block = DFSTestUtil.getFirstBlock(fs, file).getBlockName();
while (verificationTime <= 0) {
- String response = urlGet(url);
+ String response = DFSTestUtil.urlGet(url);
for(Matcher matcher = pattern.matcher(response); matcher.find();) {
if (block.equals(matcher.group(1))) {
verificationTime = Long.parseLong(matcher.group(2));
Modified: hadoop/core/branches/branch-0.20/src/webapps/hdfs/dfshealth.jsp
URL:
http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/webapps/hdfs/dfshealth.jsp?rev=749888&r1=749887&r2=749888&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/webapps/hdfs/dfshealth.jsp (original)
+++ hadoop/core/branches/branch-0.20/src/webapps/hdfs/dfshealth.jsp Wed Mar 4
03:13:06 2009
@@ -251,6 +251,8 @@
<h3>Cluster Summary</h3>
<b> <%= jspHelper.getSafeModeText()%> </b>
<b> <%= jspHelper.getInodeLimitText()%> </b>
+<a class="warning"> <%= JspHelper.getWarningText(fsn)%></a>
+
<%
generateDFSHealthReport(out, nn, request);
%>
Modified: hadoop/core/branches/branch-0.20/src/webapps/static/hadoop.css
URL:
http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.20/src/webapps/static/hadoop.css?rev=749888&r1=749887&r2=749888&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.20/src/webapps/static/hadoop.css (original)
+++ hadoop/core/branches/branch-0.20/src/webapps/static/hadoop.css Wed Mar 4
03:13:06 2009
@@ -76,6 +76,11 @@
background-color : lightyellow;
}
+.warning {
+ font-weight : bolder;
+ color : red;
+}
+
div#dfstable table {
white-space : pre;
}