Repository: hadoop
Updated Branches:
refs/heads/branch-2 145058003 -> 30791cb0d
HDFS-9229. Expose size of NameNode directory as a metric. Contributed by
Surendra Singh Lilhore.
Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
Change-Id: I5f5895bac4f7d7e66e95788765a514b80b9b6766
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30791cb0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30791cb0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30791cb0
Branch: refs/heads/branch-2
Commit: 30791cb0d038dea03cc740c0a84d23519c4e1c6a
Parents: 1450580
Author: Zhe Zhang <[email protected]>
Authored: Thu Oct 29 11:14:00 2015 -0700
Committer: Zhe Zhang <[email protected]>
Committed: Thu Oct 29 11:21:57 2015 -0700
----------------------------------------------------------------------
.../hadoop-common/src/site/markdown/Metrics.md | 1 +
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
.../hadoop/hdfs/server/common/Storage.java | 15 +++++
.../hadoop/hdfs/server/namenode/FSImage.java | 4 ++
.../hdfs/server/namenode/FSNamesystem.java | 5 ++
.../hadoop/hdfs/server/namenode/NNStorage.java | 25 ++++++++
.../hdfs/server/namenode/NameNodeMXBean.java | 5 ++
.../hdfs/server/namenode/ha/EditLogTailer.java | 3 +-
.../server/namenode/TestNameNodeMXBean.java | 62 +++++++++++++++++++-
9 files changed, 121 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30791cb0/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index c0849e6..b684062 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -240,6 +240,7 @@ Each metrics record contains tags such as HAState and
Hostname as additional inf
| `LockQueueLength` | Number of threads waiting to acquire FSNameSystem lock |
| `TotalSyncCount` | Total number of sync operations performed by edit log |
| `TotalSyncTimes` | Total number of milliseconds spent by various edit logs
in sync operation|
+| `NameDirSize` | NameNode name directories size in bytes |
JournalNode
-----------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30791cb0/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8251477..1399409 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -769,6 +769,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8545. Refactor FS#getUsed() to use ContentSummary and add an API to
fetch
the total file length from a specific path (J.Andreina via vinayakumarb)
+ HDFS-9229. Expose size of NameNode directory as a metric.
+ (Surendra Singh Lilhore via zhz)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30791cb0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index d2993fc..4dc6553 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -30,6 +30,7 @@ import java.util.Iterator;
import java.util.List;
import java.util.Properties;
+import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -310,6 +311,20 @@ public abstract class Storage extends StorageInfo {
return dirType;
}
+ /**
+ * Get storage directory size.
+ */
+ public long getDirecorySize() {
+ try {
+ if (!isShared() && root != null && root.exists()) {
+ return FileUtils.sizeOfDirectory(root);
+ }
+ } catch (Exception e) {
+ LOG.warn("Failed to get directory size :" + root, e);
+ }
+ return 0;
+ }
+
public void read(File from, Storage storage) throws IOException {
Properties props = readPropertiesFile(from);
storage.setFieldsFromProperties(props, this);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30791cb0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 23adaf5..bfbf3ff 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -1044,6 +1044,8 @@ public class FSImage implements Closeable {
} finally {
removeFromCheckpointing(imageTxId);
}
+ //Update NameDirSize Metric
+ getStorage().updateNameDirSize();
}
/**
@@ -1224,6 +1226,8 @@ public class FSImage implements Closeable {
// we won't miss this log segment on a restart if the edits directories
// go missing.
storage.writeTransactionIdFileToStorage(getEditLog().getCurSegmentTxId());
+ //Update NameDirSize Metric
+ getStorage().updateNameDirSize();
return new CheckpointSignature(this);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30791cb0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 12a0369..72d6dfe 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -6328,6 +6328,11 @@ public class FSNamesystem implements Namesystem,
FSNamesystemMBean,
return VersionInfo.getVersion();
}
+ @Override // NameNodeStatusMXBean
+ public String getNameDirSize() {
+ return getFSImage().getStorage().getNNDirectorySize();
+ }
+
/**
* Verifies that the given identifier and password are valid and match.
* @param identifier Token identifier.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30791cb0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
index d872c03..9b63e72 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
@@ -29,6 +29,7 @@ import java.util.EnumSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
+import java.util.Map;
import java.util.Properties;
import java.util.UUID;
import java.util.concurrent.CopyOnWriteArrayList;
@@ -52,6 +53,7 @@ import org.apache.hadoop.hdfs.util.PersistentLongFile;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.DNS;
import org.apache.hadoop.util.Time;
+import org.mortbay.util.ajax.JSON;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
@@ -149,6 +151,11 @@ public class NNStorage extends Storage implements
Closeable,
private HashMap<String, String> deprecatedProperties;
/**
+ * Name directories size for metric.
+ */
+ private Map<String, Long> nameDirSizeMap = new HashMap<>();
+
+ /**
* Construct the NNStorage.
* @param conf Namenode configuration.
* @param imageDirs Directories the image can be stored in.
@@ -166,6 +173,8 @@ public class NNStorage extends Storage implements Closeable,
setStorageDirectories(imageDirs,
Lists.newArrayList(editsDirs),
FSNamesystem.getSharedEditsDirs(conf));
+ //Update NameDirSize metric value after NN start
+ updateNameDirSize();
}
@Override // Storage
@@ -1075,4 +1084,20 @@ public class NNStorage extends Storage implements
Closeable,
getBlockPoolID(),
getCTime());
}
+
+ public String getNNDirectorySize() {
+ return JSON.toString(nameDirSizeMap);
+ }
+
+ public void updateNameDirSize() {
+ Map<String, Long> nnDirSizeMap = new HashMap<>();
+ for (Iterator<StorageDirectory> it = dirIterator(); it.hasNext();) {
+ StorageDirectory sd = it.next();
+ if (!sd.isShared()) {
+ nnDirSizeMap.put(sd.getRoot().getAbsolutePath(), sd.getDirecorySize());
+ }
+ }
+ nameDirSizeMap.clear();
+ nameDirSizeMap.putAll(nnDirSizeMap);
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30791cb0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
index 1e9933f..e2bca1f 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
@@ -281,4 +281,9 @@ public interface NameNodeMXBean {
*/
public Map<String, Integer> getDistinctVersions();
+ /**
+ * Get namenode directory size.
+ */
+ String getNameDirSize();
+
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30791cb0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
index 6ac3656..5730de4 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
@@ -343,6 +343,8 @@ public class EditLogTailer {
} finally {
namesystem.cpUnlock();
}
+ //Update NameDirSize Metric
+ namesystem.getFSImage().getStorage().updateNameDirSize();
} catch (EditLogInputException elie) {
LOG.warn("Error while reading edits from disk. Will try again.",
elie);
} catch (InterruptedException ie) {
@@ -362,5 +364,4 @@ public class EditLogTailer {
}
}
}
-
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/30791cb0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
index 559aae6..463ca67 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
@@ -26,12 +26,16 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
+import org.apache.hadoop.net.ServerSocketUtil;
import org.apache.hadoop.util.VersionInfo;
import org.codehaus.jackson.map.ObjectMapper;
import org.junit.Test;
@@ -40,6 +44,7 @@ import org.mortbay.util.ajax.JSON;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import java.io.File;
+import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.net.URI;
import java.util.Collection;
@@ -186,7 +191,7 @@ public class TestNameNodeMXBean {
}
assertEquals(2, statusMap.get("active").size());
assertEquals(0, statusMap.get("failed").size());
-
+
// This will cause the first dir to fail.
File failedNameDir = new File(nameDirUris.iterator().next());
assertEquals(0, FileUtil.chmod(
@@ -412,4 +417,59 @@ public class TestNameNodeMXBean {
}
}
}
+
+ @Test(timeout = 120000)
+ public void testNNDirectorySize() throws Exception{
+ Configuration conf = new Configuration();
+ conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+ // Have to specify IPC ports so the NNs can talk to each other.
+ MiniDFSNNTopology topology = new MiniDFSNNTopology()
+ .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
+ .addNN(new MiniDFSNNTopology.NNConf("nn1")
+ .setIpcPort(ServerSocketUtil.getPort(0, 100)))
+ .addNN(new MiniDFSNNTopology.NNConf("nn2")
+ .setIpcPort(ServerSocketUtil.getPort(0, 100))));
+
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+ .nnTopology(topology).numDataNodes(0)
+ .build();
+ FileSystem fs = null;
+ try {
+ cluster.waitActive();
+
+ FSNamesystem nn0 = cluster.getNamesystem(0);
+ FSNamesystem nn1 = cluster.getNamesystem(1);
+ checkNNDirSize(cluster.getNameDirs(0), nn0.getNameDirSize());
+ checkNNDirSize(cluster.getNameDirs(1), nn1.getNameDirSize());
+ cluster.transitionToActive(0);
+ fs = cluster.getFileSystem(0);
+ DFSTestUtil.createFile(fs, new Path("/file"), 0, (short) 1, 0L);
+
+ //rollEditLog
+ HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
+ cluster.getNameNode(1));
+ checkNNDirSize(cluster.getNameDirs(0), nn0.getNameDirSize());
+ checkNNDirSize(cluster.getNameDirs(1), nn1.getNameDirSize());
+
+ //Test metric after call saveNamespace
+ DFSTestUtil.createFile(fs, new Path("/file"), 0, (short) 1, 0L);
+ nn0.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+ nn0.saveNamespace();
+ checkNNDirSize(cluster.getNameDirs(0), nn0.getNameDirSize());
+ } finally {
+ cluster.shutdown();
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ private void checkNNDirSize(Collection<URI> nameDirUris, String metric){
+ Map<String, Long> nnDirMap =
+ (Map<String, Long>) JSON.parse(metric);
+ assertEquals(nameDirUris.size(), nnDirMap.size());
+ for (URI dirUrl : nameDirUris) {
+ File dir = new File(dirUrl);
+ assertEquals(nnDirMap.get(dir.getAbsolutePath()).longValue(),
+ FileUtils.sizeOfDirectory(dir));
+ }
+ }
}