Author: cutting Date: Mon Jun 19 12:03:42 2006 New Revision: 415387 URL: http://svn.apache.org/viewvc?rev=415387&view=rev Log: Add an HTTP user interface to the namenode, on port 50070.
Added: lucene/hadoop/trunk/src/webapps/dfs/ lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp lucene/hadoop/trunk/src/webapps/dfs/index.html Modified: lucene/hadoop/trunk/CHANGES.txt lucene/hadoop/trunk/build.xml lucene/hadoop/trunk/conf/hadoop-default.xml lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java Modified: lucene/hadoop/trunk/CHANGES.txt URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?rev=415387&r1=415386&r2=415387&view=diff ============================================================================== --- lucene/hadoop/trunk/CHANGES.txt (original) +++ lucene/hadoop/trunk/CHANGES.txt Mon Jun 19 12:03:42 2006 @@ -6,9 +6,11 @@ 1. HADOOP-298. Improved progress reports for CopyFiles utility, the distributed file copier. (omalley via cutting) - 2. HADOOP-299. Fix some problems in the TaskTracker, permitting - multiple jobs to more easily execute at the same time. - (omalley via cutting) + 2. HADOOP-299. Fix the task tracker, permitting multiple jobs to + more easily execute at the same time. (omalley via cutting) + + 3. HADOOP-250. Add an HTTP user interface to the namenode, running + on port 50070. (Devaraj Das via cutting) Release 0.3.2 - 2006-06-09 Modified: lucene/hadoop/trunk/build.xml URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/build.xml?rev=415387&r1=415386&r2=415387&view=diff ============================================================================== --- lucene/hadoop/trunk/build.xml (original) +++ lucene/hadoop/trunk/build.xml Mon Jun 19 12:03:42 2006 @@ -66,6 +66,7 @@ <path id="test.classpath"> <pathelement location="${test.build.classes}" /> <pathelement location="${test.src.dir}"/> + <pathelement location="${build.dir}"/> <path refid="classpath"/> </path> @@ -74,6 +75,7 @@ <path refid="classpath"/> <pathelement location="${test.build.classes}" /> <pathelement location="${test.src.dir}"/> + <pathelement location="${build.dir}"/> </path> <!-- ====================================================== --> @@ -85,6 +87,7 @@ <mkdir dir="${build.src}"/> <mkdir dir="${build.webapps}/task/WEB-INF"/> <mkdir dir="${build.webapps}/job/WEB-INF"/> + <mkdir dir="${build.webapps}/dfs/WEB-INF"/> <mkdir dir="${build.examples}"/> <mkdir dir="${test.build.dir}"/> @@ -138,6 +141,13 @@ webxml="${build.webapps}/job/WEB-INF/web.xml"> </jsp-compile> + <jsp-compile + uriroot="${src.webapps}/dfs" + outputdir="${build.src}" + package="org.apache.hadoop.dfs" + webxml="${build.webapps}/dfs/WEB-INF/web.xml"> + </jsp-compile> + <javac encoding="${build.encoding}" srcdir="${src.dir};${build.src}" @@ -253,6 +263,7 @@ errorProperty="tests.failed" failureProperty="tests.failed"> <sysproperty key="test.build.data" value="${test.build.data}"/> <sysproperty key="test.src.dir" value="${test.src.dir}"/> + <sysproperty key="hadoop.log.dir" value="."/> <classpath refid="${test.classpath.id}"/> <formatter type="plain" /> <batchtest todir="${test.build.dir}" unless="testcase"> Modified: lucene/hadoop/trunk/conf/hadoop-default.xml URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/conf/hadoop-default.xml?rev=415387&r1=415386&r2=415387&view=diff ============================================================================== --- lucene/hadoop/trunk/conf/hadoop-default.xml (original) +++ lucene/hadoop/trunk/conf/hadoop-default.xml Mon Jun 19 12:03:42 2006 @@ -96,6 +96,13 @@ </property> <property> + <name>dfs.info.port</name> + <value>50070</value> + <description>The base port number for the dfs namenode web ui. + </description> +</property> + +<property> <name>dfs.name.dir</name> <value>/tmp/hadoop/dfs/name</value> <description>Determines where on the local filesystem the DFS name node Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java?rev=415387&r1=415386&r2=415387&view=diff ============================================================================== --- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original) +++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java Mon Jun 19 12:03:42 2006 @@ -20,8 +20,10 @@ import org.apache.hadoop.io.*; import org.apache.hadoop.conf.*; import org.apache.hadoop.util.*; +import org.apache.hadoop.mapred.StatusHttpServer; import java.io.*; +import java.net.InetSocketAddress; import java.util.*; /*************************************************** @@ -57,6 +59,11 @@ // TreeMap datanodeMap = new TreeMap(); + + // + // Stores the set of dead datanodes + TreeMap deaddatanodeMap = new TreeMap(); + // // Keeps a Vector for every named machine. The Vector contains // blocks that have recently been invalidated and are thought to live @@ -89,6 +96,14 @@ // long totalCapacity = 0, totalRemaining = 0; + + // + // For the HTTP browsing interface + // + StatusHttpServer infoServer; + int infoPort; + Date startTime; + // Random r = new Random(); @@ -143,17 +158,29 @@ // HEARTBEAT_RECHECK is how often a datanode sends its hearbeat private int heartBeatRecheck; + public static FSNamesystem fsNamesystemObject; + private String localMachine; + private int port; + /** * dir is where the filesystem directory state * is stored */ public FSNamesystem(File dir, Configuration conf) throws IOException { + fsNamesystemObject = this; + this.infoPort = conf.getInt("dfs.info.port", 50070); + this.infoServer = new StatusHttpServer("dfs", infoPort, false); + this.infoServer.start(); + InetSocketAddress addr = DataNode.createSocketAddr(conf.get("fs.default.name", "local")); + this.localMachine = addr.getHostName(); + this.port = addr.getPort(); this.dir = new FSDirectory(dir, conf); this.hbthread = new Daemon(new HeartbeatMonitor()); this.lmthread = new Daemon(new LeaseMonitor()); hbthread.start(); lmthread.start(); this.systemStart = System.currentTimeMillis(); + this.startTime = new Date(systemStart); this.maxReplication = conf.getInt("dfs.replication.max", 512); this.minReplication = conf.getInt("dfs.replication.min", 1); @@ -167,6 +194,12 @@ this.maxReplicationStreams = conf.getInt("dfs.max-repl-streams", 2); this.heartBeatRecheck= 1000; } + /** Return the FSNamesystem object + * + */ + public static FSNamesystem getFSNamesystem() { + return fsNamesystemObject; + } /** Close down this filesystem manager. * Causes heartbeat and lease daemons to stop; waits briefly for @@ -177,6 +210,7 @@ fsRunning = false; } try { + infoServer.stop(); hbthread.join(3000); } catch (InterruptedException ie) { } finally { @@ -1044,7 +1078,7 @@ } // register new datanode datanodeMap.put(nodeReg.getStorageID(), - new DatanodeInfo( nodeReg ) ); + new DatanodeInfo( nodeReg ) ) ; NameNode.stateChangeLog.debug( "BLOCK* NameSystem.registerDatanode: " + "node registered." ); @@ -1104,6 +1138,7 @@ long capacityDiff = 0; long remainingDiff = 0; DatanodeInfo nodeinfo = getDatanode( nodeID ); + deaddatanodeMap.remove(nodeID.getName()); if (nodeinfo == null) { NameNode.stateChangeLog.debug("BLOCK* NameSystem.gotHeartbeat: " @@ -1166,6 +1201,7 @@ private void removeDatanode( DatanodeInfo nodeInfo ) { heartbeats.remove(nodeInfo); datanodeMap.remove(nodeInfo.getStorageID()); + deaddatanodeMap.put(nodeInfo.getName(), nodeInfo); NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeDatanode: " + nodeInfo.getName() + " is removed from datanodeMap"); totalCapacity -= nodeInfo.getCapacity(); @@ -1488,6 +1524,38 @@ return results; } + + /** + */ + public void DFSNodesStatus(Vector live, Vector dead) { + synchronized (heartbeats) { + synchronized (datanodeMap) { + live.addAll(datanodeMap.values()); + dead.addAll(deaddatanodeMap.values()); + } + } + } + /** + */ + public DatanodeInfo getDataNodeInfo(String name) { + UTF8 src = new UTF8(name); + return (DatanodeInfo)datanodeMap.get(src); + } + /** + */ + public String getDFSNameNodeMachine() { + return localMachine; + } + /** + */ + public int getDFSNameNodePort() { + return port; + } + /** + */ + public Date getStartTime() { + return startTime; + } ///////////////////////////////////////////////////////// // // These methods are called by the Namenode system, to see Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java?rev=415387&r1=415386&r2=415387&view=diff ============================================================================== --- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java (original) +++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java Mon Jun 19 12:03:42 2006 @@ -94,20 +94,20 @@ conf.setInt("dfs.replication", 1); // this timeout seems to control the minimum time for the test, so // decrease it considerably. - conf.setInt("ipc.client.timeout", 1000); + conf.setInt("ipc.client.timeout", 2000); NameNode.format(conf); nameNode = new NameNodeRunner(); nameNodeThread = new Thread(nameNode); nameNodeThread.start(); try { // let namenode get started - Thread.sleep(1000); + Thread.sleep(2000); } catch(InterruptedException e) { } dataNode = new DataNodeRunner(); dataNodeThread = new Thread(dataNode); dataNodeThread.start(); try { // let daemons get started - Thread.sleep(1000); + Thread.sleep(2000); } catch(InterruptedException e) { } } Added: lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp?rev=415387&view=auto ============================================================================== --- lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp (added) +++ lucene/hadoop/trunk/src/webapps/dfs/dfshealth.jsp Mon Jun 19 12:03:42 2006 @@ -0,0 +1,110 @@ +<%@ page + contentType="text/html; charset=UTF-8" + import="javax.servlet.*" + import="javax.servlet.http.*" + import="java.io.*" + import="java.util.*" + import="org.apache.hadoop.dfs.*" + import="java.text.DateFormat" +%> +<%! + FSNamesystem fsn = FSNamesystem.getFSNamesystem(); + String namenodeLabel = fsn.getDFSNameNodeMachine() + ":" + fsn.getDFSNameNodePort(); + + public void generateLiveNodeData(JspWriter out, DatanodeInfo d) + throws IOException { + long c = d.getCapacity(); + long r = d.getRemaining(); + long u = c - r; + String cGb = DFSShell.limitDecimal((1.0 * c)/(1024*1024*1024), 2); + String uGb = DFSShell.limitDecimal((1.0 * u)/(1024*1024*1024), 2); + String percentUsed = DFSShell.limitDecimal(((1.0 * u)/c)*100, 2); + out.print("<td style=\"vertical-align: top;\"> <b>" + + d.getName().toString() + + "</b> <br><i><b>LastContact:</b>" + + new Date(d.lastUpdate())+ "; "); + out.print("<b>Total raw bytes:</b> " + c + "(" + cGb + + " GB); "); + out.print("<b>Percent used:</b> " + percentUsed); + out.print("</i></td>"); + } + + public void generateDFSHealthReport(JspWriter out) throws IOException { + Vector live = new Vector(); + Vector dead = new Vector(); + fsn.DFSNodesStatus(live, dead); + if (live.isEmpty() && dead.isEmpty()) { + out.print("There are no datanodes in the cluster"); + } + else { + out.print("<table style=\"width: 100%; text-align: left;\" border=\"1\""+ + " cellpadding=\"2\" cellspacing=\"2\">"); + out.print("<tbody>"); + out.print("<tr>"); + out.print("<td style=\"vertical-align: top;\"><B>Live Nodes</B><br></td>"); + out.print("<td style=\"vertical-align: top;\"><B>Dead Nodes</B><br></td>"); + out.print("</tr>"); + int i = 0; + int min = (live.size() > dead.size()) ? dead.size() : live.size(); + int max = (live.size() > dead.size()) ? live.size() : dead.size(); + for (i = 0; i < min; i++) { + DatanodeInfo l = (DatanodeInfo)live.elementAt(i); + DatanodeInfo d = (DatanodeInfo)dead.elementAt(i); + out.print("<tr>"); + generateLiveNodeData(out, l); + out.print("<td style=\"vertical-align: top;\">" + + d.getName().toString() + + "<br></td>"); + out.print("</tr>"); + } + int type = (live.size() > dead.size()) ? 1 : 0; + for (i = min; i < max; i++) { + out.print("<tr>"); + if (type == 1) { + DatanodeInfo l = (DatanodeInfo)live.elementAt(i); + generateLiveNodeData(out, l); + out.print("<td style=\"vertical-align: top;\"><br></td>"); + } + else if (type == 0) { + DatanodeInfo d = (DatanodeInfo)dead.elementAt(i); + out.print("<td style=\"vertical-align: top;\"><br></td>"); + out.print("<td style=\"vertical-align: top;\">" + + d.getName().toString() + + "<br></td>"); + } + out.print("</tr>"); + } + out.print("</tbody></table>"); + } + } + public String totalCapacity() { + return fsn.totalCapacity() + "(" + DFSShell.limitDecimal((1.0 * fsn.totalCapacity())/(1024*1024*1024), 2) + " GB)"; + } + public String totalRemaining() { + return fsn.totalRemaining() + "(" + DFSShell.limitDecimal(fsn.totalRemaining()/(1024*1024*1024), 2) + " GB)"; + } +%> + +<html> + +<title>Hadoop DFS Health/Status</title> + +<body> +<h1>NameNode '<%=namenodeLabel%>'</h1> + +This NameNode has been up since <%= fsn.getStartTime()%>.<br> +<hr> +<h2>Cluster Summary</h2> +The capacity of this cluster is <%= totalCapacity()%> and remaining is <%= totalRemaining()%>. +<% + generateDFSHealthReport(out); +%> +<hr> + +<h2>Local logs</h2> +<a href="/logs/">Log</a> directory + +<hr> +<a href="http://lucene.apache.org/hadoop">Hadoop</a>, 2006.<br> +</body> +</html> Added: lucene/hadoop/trunk/src/webapps/dfs/index.html URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/webapps/dfs/index.html?rev=415387&view=auto ============================================================================== --- lucene/hadoop/trunk/src/webapps/dfs/index.html (added) +++ lucene/hadoop/trunk/src/webapps/dfs/index.html Mon Jun 19 12:03:42 2006 @@ -0,0 +1,20 @@ +<meta HTTP-EQUIV="REFRESH" content="0;url=dfshealth.jsp"/> +<html> + +<head> +<title>Hadoop Administration</title> +</head> + +<body> + +<h1>Hadoop Administration</h1> + +<ul> + +<li><a href="dfshealth.jsp">DFS Health/Status</a></li> + +</ul> + +</body> + +</html>