Author: szetszwo
Date: Mon Feb 2 19:24:48 2009
New Revision: 740084
URL: http://svn.apache.org/viewvc?rev=740084&view=rev
Log:
HADOOP-4368. Implement df in FsShell to show the status of a FileSystem.
(Craigg Macdonald via szetszwo)
Added:
hadoop/core/trunk/src/core/org/apache/hadoop/fs/FsStatus.java
Modified:
hadoop/core/trunk/CHANGES.txt
hadoop/core/trunk/src/c++/libhdfs/hdfs.c
hadoop/core/trunk/src/core/org/apache/hadoop/fs/FileSystem.java
hadoop/core/trunk/src/core/org/apache/hadoop/fs/FilterFileSystem.java
hadoop/core/trunk/src/core/org/apache/hadoop/fs/FsShell.java
hadoop/core/trunk/src/core/org/apache/hadoop/fs/RawLocalFileSystem.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java
hadoop/core/trunk/src/test/org/apache/hadoop/fs/FileSystemContractBaseTest.java
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java
hadoop/core/trunk/src/webapps/hdfs/dfshealth.jsp
Modified: hadoop/core/trunk/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=740084&r1=740083&r2=740084&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Mon Feb 2 19:24:48 2009
@@ -29,6 +29,9 @@
HADOOP-3953. Implement sticky bit for directories in HDFS. (Jakob Homan
via szetszwo)
+ HADOOP-4368. Implement df in FsShell to show the status of a FileSystem.
+ (Craig Macdonald via szetszwo)
+
IMPROVEMENTS
HADOOP-4936. Improvements to TestSafeMode. (shv)
Modified: hadoop/core/trunk/src/c++/libhdfs/hdfs.c
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/c%2B%2B/libhdfs/hdfs.c?rev=740084&r1=740083&r2=740084&view=diff
==============================================================================
--- hadoop/core/trunk/src/c++/libhdfs/hdfs.c (original)
+++ hadoop/core/trunk/src/c++/libhdfs/hdfs.c Mon Feb 2 19:24:48 2009
@@ -25,6 +25,7 @@
#define HADOOP_PATH "org/apache/hadoop/fs/Path"
#define HADOOP_LOCALFS "org/apache/hadoop/fs/LocalFileSystem"
#define HADOOP_FS "org/apache/hadoop/fs/FileSystem"
+#define HADOOP_FSSTATUS "org/apache/hadoop/fs/FsStatus"
#define HADOOP_BLK_LOC "org/apache/hadoop/fs/BlockLocation"
#define HADOOP_DFS "org/apache/hadoop/hdfs/DistributedFileSystem"
#define HADOOP_ISTRM "org/apache/hadoop/fs/FSDataInputStream"
@@ -1678,7 +1679,8 @@
tOffset hdfsGetCapacity(hdfsFS fs)
{
// JAVA EQUIVALENT:
- // fs.getRawCapacity();
+ // FsStatus fss = fs.getStatus();
+ // return Fss.getCapacity();
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
@@ -1689,23 +1691,22 @@
jobject jFS = (jobject)fs;
- if (!((*env)->IsInstanceOf(env, jFS,
- globalClassReference(HADOOP_DFS, env)))) {
- fprintf(stderr, "hdfsGetCapacity works only on a "
- "DistributedFileSystem!\n");
- return -1;
- }
-
- //FileSystem::getRawCapacity()
+ //FileSystem::getStatus
jvalue jVal;
jthrowable jExc = NULL;
- if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_DFS,
- "getRawCapacity", "()J") != 0) {
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
+ "getStatus", "()Lorg/apache/hadoop/fs/FsStatus;") != 0) {
errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
- "FileSystem::getRawCapacity");
+ "FileSystem::getStatus");
+ return -1;
+ }
+ jobject fss = (jobject)jVal.l;
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, fss, HADOOP_FSSTATUS,
+ "getCapacity", "()J") != 0) {
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FsStatus::getCapacity");
return -1;
}
-
return jVal.j;
}
@@ -1714,7 +1715,8 @@
tOffset hdfsGetUsed(hdfsFS fs)
{
// JAVA EQUIVALENT:
- // fs.getRawUsed();
+ // FsStatus fss = fs.getStatus();
+ // return Fss.getUsed();
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
@@ -1725,24 +1727,24 @@
jobject jFS = (jobject)fs;
- if (!((*env)->IsInstanceOf(env, jFS,
- globalClassReference(HADOOP_DFS, env)))) {
- fprintf(stderr, "hdfsGetUsed works only on a "
- "DistributedFileSystem!\n");
+ //FileSystem::getStatus
+ jvalue jVal;
+ jthrowable jExc = NULL;
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
+ "getStatus", "()Lorg/apache/hadoop/fs/FsStatus;") != 0) {
+ errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
+ "FileSystem::getStatus");
return -1;
}
-
- //FileSystem::getRawUsed()
- jvalue jVal;
- jthrowable jExc = NULL;
- if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_DFS,
- "getRawUsed", "()J") != 0) {
+ jobject fss = (jobject)jVal.l;
+ if (invokeMethod(env, &jVal, &jExc, INSTANCE, fss, HADOOP_FSSTATUS,
+ "getUsed", "()J") != 0) {
errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
- "FileSystem::getRawUsed");
+ "FsStatus::getUsed");
return -1;
}
-
return jVal.j;
+
}
Modified: hadoop/core/trunk/src/core/org/apache/hadoop/fs/FileSystem.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/core/org/apache/hadoop/fs/FileSystem.java?rev=740084&r1=740083&r2=740084&view=diff
==============================================================================
--- hadoop/core/trunk/src/core/org/apache/hadoop/fs/FileSystem.java (original)
+++ hadoop/core/trunk/src/core/org/apache/hadoop/fs/FileSystem.java Mon Feb 2
19:24:48 2009
@@ -1282,6 +1282,34 @@
}
return results.toArray(new FileStatus[results.size()]);
}
+
+ /**
+ * Returns a status object describing the use and capacity of the
+ * file system. If the file system has multiple partitions, the
+ * use and capacity of the root partition is reflected.
+ *
+ * @return a FsStatus object
+ * @throws IOException
+ * see specific implementation
+ */
+ public FsStatus getStatus() throws IOException {
+ return getStatus(null);
+ }
+
+ /**
+ * Returns a status object describing the use and capacity of the
+ * file system. If the file system has multiple partitions, the
+ * use and capacity of the partition pointed to by the specified
+ * path is reflected.
+ * @param p Path for which status should be obtained. null means
+ * the default partition.
+ * @return a FsStatus object
+ * @throws IOException
+ * see specific implementation
+ */
+ public FsStatus getStatus(Path p) throws IOException {
+ return new FsStatus(Long.MAX_VALUE, 0, Long.MAX_VALUE);
+ }
/**
* Set permission of a path.
Modified: hadoop/core/trunk/src/core/org/apache/hadoop/fs/FilterFileSystem.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/core/org/apache/hadoop/fs/FilterFileSystem.java?rev=740084&r1=740083&r2=740084&view=diff
==============================================================================
--- hadoop/core/trunk/src/core/org/apache/hadoop/fs/FilterFileSystem.java
(original)
+++ hadoop/core/trunk/src/core/org/apache/hadoop/fs/FilterFileSystem.java Mon
Feb 2 19:24:48 2009
@@ -171,6 +171,12 @@
public Path getWorkingDirectory() {
return fs.getWorkingDirectory();
}
+
+ /** {...@inheritdoc} */
+ @Override
+ public FsStatus getStatus(Path p) throws IOException {
+ return fs.getStatus(p);
+ }
/** {...@inheritdoc} */
@Override
Modified: hadoop/core/trunk/src/core/org/apache/hadoop/fs/FsShell.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/core/org/apache/hadoop/fs/FsShell.java?rev=740084&r1=740083&r2=740084&view=diff
==============================================================================
--- hadoop/core/trunk/src/core/org/apache/hadoop/fs/FsShell.java (original)
+++ hadoop/core/trunk/src/core/org/apache/hadoop/fs/FsShell.java Mon Feb 2
19:24:48 2009
@@ -637,6 +637,28 @@
}
}
+ /**
+ * Show the size of a partition in the filesystem that contains
+ * the specified <i>path</i>.
+ * @param path a path specifying the source partition. null means /.
+ * @throws IOException
+ */
+ void df(String path) throws IOException {
+ if (path == null) path = "/";
+ final Path srcPath = new Path(path);
+ final FileSystem srcFs = srcPath.getFileSystem(getConf());
+ if (! srcFs.exists(srcPath)) {
+ throw new FileNotFoundException("Cannot access "+srcPath.toString());
+ }
+ final FsStatus stats = srcFs.getStatus(srcPath);
+ final int PercentUsed = (int)(100.0f * (float)stats.getUsed() /
(float)stats.getCapacity());
+ System.out.println("Filesystem\t\tSize\tUsed\tAvail\tUse%");
+ System.out.printf("%s\t\t%d\t%d\t%d\t%d%%\n",
+ path,
+ stats.getCapacity(), stats.getUsed(), stats.getRemaining(),
+ PercentUsed);
+ }
+
/**
* Show the size of all files that match the file pattern <i>src</i>
* @param src a file pattern specifying source files
@@ -1236,7 +1258,7 @@
String summary = "hadoop fs is the command to execute fs commands. " +
"The full syntax is: \n\n" +
"hadoop fs [-fs <local | file system URI>] [-conf <configuration
file>]\n\t" +
- "[-D <property=value>] [-ls <path>] [-lsr <path>] [-du <path>]\n\t" +
+ "[-D <property=value>] [-ls <path>] [-lsr <path>] [-df [<path>]] [-du
<path>]\n\t" +
"[-dus <path>] [-mv <src> <dst>] [-cp <src> <dst>] [-rm <src>]\n\t" +
"[-rmr <src>] [-put <localsrc> ... <dst>] [-copyFromLocal <localsrc> ...
<dst>]\n\t" +
"[-moveFromLocal <localsrc> ... <dst>] [" +
@@ -1282,6 +1304,10 @@
"\t\texcept that the data is shown for all the entries in the\n" +
"\t\tsubtree.\n";
+ String df = "-df [<path>]: \tShows the capacity, free and used space of
the filesystem.\n"+
+ "\t\tIf the filesystem has multiple partitions, and no path to a
particular partition\n"+
+ "\t\tis specified, then the status of the root partitions will be
shown.\n";
+
String du = "-du <path>: \tShow the amount of space, in bytes, used by the
files that \n" +
"\t\tmatch the specified file pattern. Equivalent to the unix\n" +
"\t\tcommand \"du -sb <path>/*\" in case of a directory, \n" +
@@ -1406,6 +1432,8 @@
System.out.println(ls);
} else if ("lsr".equals(cmd)) {
System.out.println(lsr);
+ } else if ("df".equals(cmd)) {
+ System.out.println(df);
} else if ("du".equals(cmd)) {
System.out.println(du);
} else if ("dus".equals(cmd)) {
@@ -1463,6 +1491,7 @@
System.out.println(fs);
System.out.println(ls);
System.out.println(lsr);
+ System.out.println(df);
System.out.println(du);
System.out.println(dus);
System.out.println(mv);
@@ -1512,6 +1541,8 @@
delete(argv[i], false);
} else if ("-rmr".equals(cmd)) {
delete(argv[i], true);
+ } else if ("-df".equals(cmd)) {
+ df(argv[i]);
} else if ("-du".equals(cmd)) {
du(argv[i]);
} else if ("-dus".equals(cmd)) {
@@ -1580,6 +1611,9 @@
"-text".equals(cmd)) {
System.err.println("Usage: java FsShell" +
" [" + cmd + " <path>]");
+ } else if ("-df".equals(cmd) ) {
+ System.err.println("Usage: java FsShell" +
+ " [" + cmd + " [<path>]]");
} else if (Count.matches(cmd)) {
System.err.println(prefix + " [" + Count.USAGE + "]");
} else if ("-mv".equals(cmd) || "-cp".equals(cmd)) {
@@ -1613,6 +1647,7 @@
System.err.println("Usage: java FsShell");
System.err.println(" [-ls <path>]");
System.err.println(" [-lsr <path>]");
+ System.err.println(" [-df [<path>]]");
System.err.println(" [-du <path>]");
System.err.println(" [-dus <path>]");
System.err.println(" [" + Count.USAGE + "]");
@@ -1658,7 +1693,6 @@
int exitCode = -1;
int i = 0;
String cmd = argv[i++];
-
//
// verify that we have enough command line parameters
//
@@ -1688,7 +1722,6 @@
return exitCode;
}
}
-
// initialize FsShell
try {
init();
@@ -1754,6 +1787,12 @@
exitCode = doall(cmd, argv, i);
} else if ("-expunge".equals(cmd)) {
expunge();
+ } else if ("-df".equals(cmd)) {
+ if (argv.length-1 > 0) {
+ exitCode = doall(cmd, argv, i);
+ } else {
+ df(null);
+ }
} else if ("-du".equals(cmd)) {
if (i < argv.length) {
exitCode = doall(cmd, argv, i);
Added: hadoop/core/trunk/src/core/org/apache/hadoop/fs/FsStatus.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/core/org/apache/hadoop/fs/FsStatus.java?rev=740084&view=auto
==============================================================================
--- hadoop/core/trunk/src/core/org/apache/hadoop/fs/FsStatus.java (added)
+++ hadoop/core/trunk/src/core/org/apache/hadoop/fs/FsStatus.java Mon Feb 2
19:24:48 2009
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.io.Writable;
+
+/** This class is used to represent the capacity, free and used space on a
+ * {...@link FileSystem}.
+ */
+public class FsStatus implements Writable {
+ private long capacity;
+ private long used;
+ private long remaining;
+
+ /** Construct a FsStatus object, using the specified statistics */
+ public FsStatus(long capacity, long used, long remaining) {
+ this.capacity = capacity;
+ this.used = used;
+ this.remaining = remaining;
+ }
+
+ /** Return the capacity in bytes of the file system */
+ public long getCapacity() {
+ return capacity;
+ }
+
+ /** Return the number of bytes used on the file system */
+ public long getUsed() {
+ return used;
+ }
+
+ /** Return the number of remaining bytes on the file system */
+ public long getRemaining() {
+ return remaining;
+ }
+
+ //////////////////////////////////////////////////
+ // Writable
+ //////////////////////////////////////////////////
+ public void write(DataOutput out) throws IOException {
+ out.writeLong(capacity);
+ out.writeLong(used);
+ out.writeLong(remaining);
+ }
+
+ public void readFields(DataInput in) throws IOException {
+ capacity = in.readLong();
+ used = in.readLong();
+ remaining = in.readLong();
+ }
+}
Modified:
hadoop/core/trunk/src/core/org/apache/hadoop/fs/RawLocalFileSystem.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/core/org/apache/hadoop/fs/RawLocalFileSystem.java?rev=740084&r1=740083&r2=740084&view=diff
==============================================================================
--- hadoop/core/trunk/src/core/org/apache/hadoop/fs/RawLocalFileSystem.java
(original)
+++ hadoop/core/trunk/src/core/org/apache/hadoop/fs/RawLocalFileSystem.java Mon
Feb 2 19:24:48 2009
@@ -328,6 +328,17 @@
public Path getWorkingDirectory() {
return workingDir;
}
+
+ /** {...@inheritdoc} */
+ @Override
+ public FsStatus getStatus(Path p) throws IOException {
+ File partition = pathToFile(p == null ? new Path("/") : p);
+ //File provides getUsableSpace() and getFreeSpace()
+ //File provides no API to obtain used space, assume used = total - free
+ return new FsStatus(partition.getTotalSpace(),
+ partition.getTotalSpace() - partition.getFreeSpace(),
+ partition.getFreeSpace());
+ }
// In the case of the local filesystem, we can just rename the file.
public void moveFromLocalFile(Path src, Path dst) throws IOException {
Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java?rev=740084&r1=740083&r2=740084&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java Mon Feb 2
19:24:48 2009
@@ -27,7 +27,6 @@
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.conf.*;
-import org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus;
import org.apache.hadoop.hdfs.protocol.*;
import org.apache.hadoop.hdfs.server.common.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
@@ -695,22 +694,9 @@
}
}
- public DiskStatus getDiskStatus() throws IOException {
+ public FsStatus getDiskStatus() throws IOException {
long rawNums[] = namenode.getStats();
- return new DiskStatus(rawNums[0], rawNums[1], rawNums[2]);
- }
- /**
- */
- public long totalRawCapacity() throws IOException {
- long rawNums[] = namenode.getStats();
- return rawNums[0];
- }
-
- /**
- */
- public long totalRawUsed() throws IOException {
- long rawNums[] = namenode.getStats();
- return rawNums[1];
+ return new FsStatus(rawNums[0], rawNums[1], rawNums[2]);
}
public DatanodeInfo[] datanodeReport(DatanodeReportType type)
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=740084&r1=740083&r2=740084&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java
Mon Feb 2 19:24:48 2009
@@ -271,44 +271,52 @@
return dfs;
}
- public static class DiskStatus {
- private long capacity;
- private long dfsUsed;
- private long remaining;
+ /** @deprecated Use {...@link org.apache.hadoop.fs.FsStatus instead */
+ @Deprecated
+ public static class DiskStatus extends FsStatus {
+ public DiskStatus(FsStatus stats) {
+ super(stats.getCapacity(), stats.getUsed(), stats.getRemaining());
+ }
+
public DiskStatus(long capacity, long dfsUsed, long remaining) {
- this.capacity = capacity;
- this.dfsUsed = dfsUsed;
- this.remaining = remaining;
- }
-
- public long getCapacity() {
- return capacity;
+ super(capacity, dfsUsed, remaining);
}
+
public long getDfsUsed() {
- return dfsUsed;
- }
- public long getRemaining() {
- return remaining;
+ return super.getUsed();
}
}
+ /** {...@inheritdoc} */
+ public FsStatus getStatus(Path p) throws IOException {
+ return dfs.getDiskStatus();
+ }
/** Return the disk usage of the filesystem, including total capacity,
- * used space, and remaining space */
+ * used space, and remaining space
+ * @deprecated Use {...@link org.apache.hadoop.fs.FileSystem#getStatus()}
+ * instead */
+ @Deprecated
public DiskStatus getDiskStatus() throws IOException {
- return dfs.getDiskStatus();
+ return new DiskStatus(dfs.getDiskStatus());
}
/** Return the total raw capacity of the filesystem, disregarding
- * replication .*/
+ * replication.
+ * @deprecated Use {...@link org.apache.hadoop.fs.FileSystem#getStatus()}
+ * instead */
+ @Deprecated
public long getRawCapacity() throws IOException{
- return dfs.totalRawCapacity();
+ return dfs.getDiskStatus().getCapacity();
}
/** Return the total raw used space in the filesystem, disregarding
- * replication .*/
+ * replication.
+ * @deprecated Use {...@link org.apache.hadoop.fs.FileSystem#getStatus()}
+ * instead */
+ @Deprecated
public long getRawUsed() throws IOException{
- return dfs.totalRawUsed();
+ return dfs.getDiskStatus().getUsed();
}
/** Return statistics for each datanode. */
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=740084&r1=740083&r2=740084&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
Mon Feb 2 19:24:48 2009
@@ -3232,11 +3232,10 @@
addStoredBlock(block, node, delHintNode );
}
- long[] getStats() throws IOException {
- checkSuperuserPrivilege();
+ long[] getStats() {
synchronized(heartbeats) {
return new long[]
- {getCapacityTotal(), getCapacityUsed(), getCapacityRemaining()};
+ {this.capacityTotal, this.capacityUsed, this.capacityRemaining};
}
}
@@ -3244,18 +3243,14 @@
* Total raw bytes including non-dfs used space.
*/
public long getCapacityTotal() {
- synchronized (heartbeats) {
- return this.capacityTotal;
- }
+ return getStats()[0];
}
/**
* Total used space by data nodes
*/
public long getCapacityUsed() {
- synchronized(heartbeats){
- return this.capacityUsed;
- }
+ return getStats()[1];
}
/**
* Total used space by data nodes as percentage of total capacity
@@ -3284,9 +3279,7 @@
* Total non-used raw bytes.
*/
public long getCapacityRemaining() {
- synchronized (heartbeats) {
- return this.capacityRemaining;
- }
+ return getStats()[2];
}
/**
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=740084&r1=740083&r2=740084&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
Mon Feb 2 19:24:48 2009
@@ -579,7 +579,7 @@
}
/** @inheritDoc */
- public long[] getStats() throws IOException {
+ public long[] getStats() {
return namesystem.getStats();
}
Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=740084&r1=740083&r2=740084&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java
(original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java Mon
Feb 2 19:24:48 2009
@@ -24,7 +24,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
@@ -32,6 +31,7 @@
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.shell.Command;
@@ -260,9 +260,9 @@
public void report() throws IOException {
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem dfs = (DistributedFileSystem) fs;
- DiskStatus ds = dfs.getDiskStatus();
+ FsStatus ds = dfs.getStatus();
long capacity = ds.getCapacity();
- long used = ds.getDfsUsed();
+ long used = ds.getUsed();
long remaining = ds.getRemaining();
long presentCapacity = used + remaining;
boolean mode = dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET);
Modified:
hadoop/core/trunk/src/test/org/apache/hadoop/fs/FileSystemContractBaseTest.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/fs/FileSystemContractBaseTest.java?rev=740084&r1=740083&r2=740084&view=diff
==============================================================================
---
hadoop/core/trunk/src/test/org/apache/hadoop/fs/FileSystemContractBaseTest.java
(original)
+++
hadoop/core/trunk/src/test/org/apache/hadoop/fs/FileSystemContractBaseTest.java
Mon Feb 2 19:24:48 2009
@@ -68,6 +68,15 @@
protected boolean renameSupported() {
return true;
}
+
+ public void testFsStatus() throws Exception {
+ FsStatus fsStatus = fs.getStatus();
+ assertNotNull(fsStatus);
+ //used, free and capacity are non-negative longs
+ assertTrue(fsStatus.getUsed() >= 0);
+ assertTrue(fsStatus.getRemaining() >= 0);
+ assertTrue(fsStatus.getCapacity() >= 0);
+ }
public void testWorkingDirectory() throws Exception {
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=740084&r1=740083&r2=740084&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java
(original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java Mon
Feb 2 19:24:48 2009
@@ -676,16 +676,12 @@
if (nameNode == null) {
return false;
}
- try {
- long[] sizes = nameNode.getStats();
- boolean isUp = false;
- synchronized (this) {
- isUp = (!nameNode.isInSafeMode() && sizes[0] != 0);
- }
- return isUp;
- } catch (IOException ie) {
- return false;
+ long[] sizes = nameNode.getStats();
+ boolean isUp = false;
+ synchronized (this) {
+ isUp = (!nameNode.isInSafeMode() && sizes[0] != 0);
}
+ return isUp;
}
/**
Modified: hadoop/core/trunk/src/webapps/hdfs/dfshealth.jsp
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/webapps/hdfs/dfshealth.jsp?rev=740084&r1=740083&r2=740084&view=diff
==============================================================================
--- hadoop/core/trunk/src/webapps/hdfs/dfshealth.jsp (original)
+++ hadoop/core/trunk/src/webapps/hdfs/dfshealth.jsp Mon Feb 2 19:24:48 2009
@@ -186,13 +186,16 @@
}
counterReset();
-
- long total = fsn.getCapacityTotal();
- long remaining = fsn.getCapacityRemaining();
- long used = fsn.getCapacityUsed();
- long nonDFS = fsn.getCapacityUsedNonDFS();
- float percentUsed = fsn.getCapacityUsedPercent();
- float percentRemaining = fsn.getCapacityRemainingPercent();
+ long[] fsnStats = fsn.getStats();
+ long total = fsnStats[0];
+ long remaining = fsnStats[2];
+ long used = fsnStats[1];
+ long nonDFS = total - remaining - used;
+ nonDFS = nonDFS < 0 ? 0 : nonDFS;
+ float percentUsed = total <= 0
+ ? 0f : ((float)used * 100.0f)/(float)total;
+ float percentRemaining = total <= 0
+ ? 100f : ((float)remaining * 100.0f)/(float)total;
out.print( "<div id=\"dfstable\"> <table>\n" +
rowTxt() + colTxt() + "Configured Capacity" + colTxt() + ":" +
colTxt() +