Author: shv
Date: Mon Jun 15 23:24:51 2009
New Revision: 785025
URL: http://svn.apache.org/viewvc?rev=785025&view=rev
Log:
HADOOP-5897. Add name-node metrics to capture java heap usage. Contributed by
Suresh Srinivas.
Added:
hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/
hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
Modified:
hadoop/core/trunk/CHANGES.txt
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java
Modified: hadoop/core/trunk/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=785025&r1=785024&r2=785025&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Mon Jun 15 23:24:51 2009
@@ -149,6 +149,9 @@
HADOOP-5170. Allows jobs to set max maps/reduces per-node and per-cluster.
(Matei Zaharia via ddas)
+ HADOOP-5897. Add name-node metrics to capture java heap usage.
+ (Suresh Srinivas via shv)
+
IMPROVEMENTS
HADOOP-4565. Added CombineFileInputFormat to use data locality information
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockManager.java?rev=785025&r1=785024&r2=785025&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
Mon Jun 15 23:24:51 2009
@@ -48,16 +48,24 @@
* methods to be called with lock held on {...@link FSNamesystem}.
*/
public class BlockManager {
+ // Default initial capacity and load factor of map
+ public static final int DEFAULT_INITIAL_MAP_CAPACITY = 16;
+ public static final float DEFAULT_MAP_LOAD_FACTOR = 0.75f;
+
private final FSNamesystem namesystem;
- long pendingReplicationBlocksCount = 0L, corruptReplicaBlocksCount,
- underReplicatedBlocksCount = 0L, scheduledReplicationBlocksCount = 0L;
+ volatile long pendingReplicationBlocksCount = 0L;
+ volatile long corruptReplicaBlocksCount = 0L;
+ volatile long underReplicatedBlocksCount = 0L;
+ volatile long scheduledReplicationBlocksCount = 0L;
+ volatile long excessBlocksCount = 0L;
+ volatile long pendingDeletionBlocksCount = 0L;
//
// Mapping: Block -> { INode, datanodes, self ref }
// Updated only in response to client-sent information.
//
- BlocksMap blocksMap = new BlocksMap();
+ final BlocksMap blocksMap;
//
// Store blocks-->datanodedescriptor(s) map of corrupt replicas
@@ -110,11 +118,17 @@
ReplicationTargetChooser replicator;
BlockManager(FSNamesystem fsn, Configuration conf) throws IOException {
+ this(fsn, conf, DEFAULT_INITIAL_MAP_CAPACITY);
+ }
+
+ BlockManager(FSNamesystem fsn, Configuration conf, int capacity)
+ throws IOException {
namesystem = fsn;
pendingReplications = new PendingReplicationBlocks(
conf.getInt("dfs.replication.pending.timeout.sec",
-1) * 1000L);
setConfigurationParameters(conf);
+ blocksMap = new BlocksMap(capacity, DEFAULT_MAP_LOAD_FACTOR);
}
void setConfigurationParameters(Configuration conf) throws IOException {
@@ -324,8 +338,11 @@
void removeFromInvalidates(String datanodeId, Block block) {
Collection<Block> v = recentInvalidateSets.get(datanodeId);
- if (v != null && v.remove(block) && v.isEmpty()) {
- recentInvalidateSets.remove(datanodeId);
+ if (v != null && v.remove(block)) {
+ pendingDeletionBlocksCount--;
+ if (v.isEmpty()) {
+ recentInvalidateSets.remove(datanodeId);
+ }
}
}
@@ -344,6 +361,7 @@
recentInvalidateSets.put(dn.getStorageID(), invalidateSet);
}
if (invalidateSet.add(b)) {
+ pendingDeletionBlocksCount++;
NameNode.stateChangeLog.info("BLOCK* NameSystem.addToInvalidates: "
+ b.getBlockName() + " is added to invalidSet of " + dn.getName());
}
@@ -366,7 +384,8 @@
*/
private void dumpRecentInvalidateSets(PrintWriter out) {
int size = recentInvalidateSets.values().size();
- out.println("Metasave: Blocks waiting deletion from "+size+" datanodes.");
+ out.println("Metasave: Blocks " + pendingDeletionBlocksCount
+ + " waiting deletion from " + size + " datanodes.");
if (size == 0) {
return;
}
@@ -1101,10 +1120,12 @@
excessBlocks = new TreeSet<Block>();
excessReplicateMap.put(dn.getStorageID(), excessBlocks);
}
- excessBlocks.add(block);
- NameNode.stateChangeLog.debug("BLOCK* NameSystem.chooseExcessReplicates: "
- + "(" + dn.getName() + ", " + block
- + ") is added to excessReplicateMap");
+ if (excessBlocks.add(block)) {
+ excessBlocksCount++;
+ NameNode.stateChangeLog.debug("BLOCK* NameSystem.chooseExcessReplicates:"
+ + " (" + dn.getName() + ", " + block
+ + ") is added to excessReplicateMap");
+ }
}
/**
@@ -1140,11 +1161,13 @@
Collection<Block> excessBlocks = excessReplicateMap.get(node
.getStorageID());
if (excessBlocks != null) {
- excessBlocks.remove(block);
- NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: "
- + block + " is removed from excessBlocks");
- if (excessBlocks.size() == 0) {
- excessReplicateMap.remove(node.getStorageID());
+ if (excessBlocks.remove(block)) {
+ excessBlocksCount--;
+ NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: "
+ + block + " is removed from excessBlocks");
+ if (excessBlocks.size() == 0) {
+ excessReplicateMap.remove(node.getStorageID());
+ }
}
}
@@ -1243,12 +1266,7 @@
}
int getActiveBlockCount() {
- int activeBlocks = blocksMap.size();
- for(Iterator<Collection<Block>> it =
- recentInvalidateSets.values().iterator(); it.hasNext();) {
- activeBlocks -= it.next().size();
- }
- return activeBlocks;
+ return blocksMap.size() - (int)pendingDeletionBlocksCount;
}
DatanodeDescriptor[] getNodes(Block block) {
@@ -1312,8 +1330,11 @@
* Remove a datanode from the invalidatesSet
* @param n datanode
*/
- void removeFromInvalidates(DatanodeInfo n) {
- recentInvalidateSets.remove(n.getStorageID());
+ void removeFromInvalidates(String storageID) {
+ Collection<Block> blocks = recentInvalidateSets.remove(storageID);
+ if (blocks != null) {
+ pendingDeletionBlocksCount -= blocks.size();
+ }
}
/**
@@ -1331,7 +1352,7 @@
assert nodeId != null;
DatanodeDescriptor dn = namesystem.getDatanode(nodeId);
if (dn == null) {
- recentInvalidateSets.remove(nodeId);
+ removeFromInvalidates(nodeId);
return 0;
}
@@ -1351,8 +1372,9 @@
}
// If we send everything in this message, remove this node entry
- if (!it.hasNext())
- recentInvalidateSets.remove(nodeId);
+ if (!it.hasNext()) {
+ removeFromInvalidates(nodeId);
+ }
dn.addBlocksToBeInvalidated(blocksToInvalidate);
@@ -1397,4 +1419,14 @@
void removeBlockFromMap(BlockInfo blockInfo) {
blocksMap.removeBlock(blockInfo);
}
+
+ public int getCapacity() {
+ synchronized(namesystem) {
+ return blocksMap.getCapacity();
+ }
+ }
+
+ public float getLoadFactor() {
+ return blocksMap.getLoadFactor();
+ }
}
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java?rev=785025&r1=785024&r2=785025&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java
Mon Jun 15 23:24:51 2009
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
@@ -306,7 +305,20 @@
}
}
- private Map<Block, BlockInfo> map = new HashMap<Block, BlockInfo>();
+ // Used for tracking HashMap capacity growth
+ private int capacity;
+ private final float loadFactor;
+
+ private Map<BlockInfo, BlockInfo> map;
+
+ BlocksMap(int initialCapacity, float loadFactor) {
+ this.capacity = 1;
+ // Capacity is initialized to the next multiple of 2 of initialCapacity
+ while (this.capacity < initialCapacity)
+ this.capacity <<= 1;
+ this.loadFactor = loadFactor;
+ this.map = new HashMap<BlockInfo, BlockInfo>(initialCapacity, loadFactor);
+ }
/**
* Add BlockInfo if mapping does not exist.
@@ -437,4 +449,18 @@
return true;
}
+
+ /** Get the capacity of the HashMap that stores blocks */
+ public int getCapacity() {
+ // Capacity doubles every time the map size reaches the threshold
+ while (map.size() > (int)(capacity * loadFactor)) {
+ capacity <<= 1;
+ }
+ return capacity;
+ }
+
+ /** Get the load factor of the map */
+ public float getLoadFactor() {
+ return loadFactor;
+ }
}
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java?rev=785025&r1=785024&r2=785025&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java
Mon Jun 15 23:24:51 2009
@@ -61,10 +61,6 @@
"on " + dn.getName() +
" by " + Server.getRemoteIp());
}
- if (NameNode.getNameNodeMetrics() != null) {
- NameNode.getNameNodeMetrics().numBlocksCorrupted.set(
- corruptReplicasMap.size());
- }
}
/**
@@ -75,10 +71,6 @@
void removeFromCorruptReplicasMap(Block blk) {
if (corruptReplicasMap != null) {
corruptReplicasMap.remove(blk);
- if (NameNode.getNameNodeMetrics() != null) {
- NameNode.getNameNodeMetrics().numBlocksCorrupted.set(
- corruptReplicasMap.size());
- }
}
}
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=785025&r1=785024&r2=785025&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
Mon Jun 15 23:24:51 2009
@@ -2146,7 +2146,7 @@
void unprotectedRemoveDatanode(DatanodeDescriptor nodeDescr) {
nodeDescr.resetBlocks();
- blockManager.removeFromInvalidates(nodeDescr);
+ blockManager.removeFromInvalidates(nodeDescr.getStorageID());
NameNode.stateChangeLog.debug(
"BLOCK*
NameSystem.unprotectedRemoveDatanode: "
+ nodeDescr.getName() + " is out of service
now.");
@@ -2419,7 +2419,7 @@
return new long[] {this.capacityTotal, this.capacityUsed,
this.capacityRemaining,
getUnderReplicatedBlocks(),
- getCorruptReplicaBlocksCount(),
+ getCorruptReplicaBlocks(),
getMissingBlocksCount()};
}
}
@@ -3469,7 +3469,7 @@
}
/** Returns number of blocks with corrupt replicas */
- public long getCorruptReplicaBlocksCount() {
+ public long getCorruptReplicaBlocks() {
return blockManager.corruptReplicaBlocksCount;
}
@@ -3477,6 +3477,18 @@
return blockManager.scheduledReplicationBlocksCount;
}
+ public long getPendingDeletionBlocks() {
+ return blockManager.pendingDeletionBlocksCount;
+ }
+
+ public long getExcessBlocks() {
+ return blockManager.excessBlocksCount;
+ }
+
+ public int getBlockCapacity() {
+ return blockManager.getCapacity();
+ }
+
public String getFSState() {
return isInSafeMode() ? "safeMode" : "Operational";
}
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java?rev=785025&r1=785024&r2=785025&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java
Mon Jun 15 23:24:51 2009
@@ -43,19 +43,22 @@
public class FSNamesystemMetrics implements Updater {
private static Log log = LogFactory.getLog(FSNamesystemMetrics.class);
private final MetricsRecord metricsRecord;
- private final MetricsRegistry registry = new MetricsRegistry();
-
+ final MetricsRegistry registry = new MetricsRegistry();
- private final MetricsIntValue filesTotal = new MetricsIntValue("FilesTotal",
registry);
- private final MetricsLongValue blocksTotal = new
MetricsLongValue("BlocksTotal", registry);
- private final MetricsIntValue capacityTotalGB = new
MetricsIntValue("CapacityTotalGB", registry);
- private final MetricsIntValue capacityUsedGB = new
MetricsIntValue("CapacityUsedGB", registry);
- private final MetricsIntValue capacityRemainingGB = new
MetricsIntValue("CapacityRemainingGB", registry);
- private final MetricsIntValue totalLoad = new MetricsIntValue("TotalLoad",
registry);
- private final MetricsIntValue pendingReplicationBlocks = new
MetricsIntValue("PendingReplicationBlocks", registry);
- private final MetricsIntValue underReplicatedBlocks = new
MetricsIntValue("UnderReplicatedBlocks", registry);
- private final MetricsIntValue scheduledReplicationBlocks = new
MetricsIntValue("ScheduledReplicationBlocks", registry);
- private final MetricsIntValue missingBlocks = new
MetricsIntValue("MissingBlocks", registry);
+ final MetricsIntValue filesTotal = new MetricsIntValue("FilesTotal",
registry);
+ final MetricsLongValue blocksTotal = new MetricsLongValue("BlocksTotal",
registry);
+ final MetricsIntValue capacityTotalGB = new
MetricsIntValue("CapacityTotalGB", registry);
+ final MetricsIntValue capacityUsedGB = new MetricsIntValue("CapacityUsedGB",
registry);
+ final MetricsIntValue capacityRemainingGB = new
MetricsIntValue("CapacityRemainingGB", registry);
+ final MetricsIntValue totalLoad = new MetricsIntValue("TotalLoad", registry);
+ final MetricsIntValue pendingDeletionBlocks = new
MetricsIntValue("PendingDeletionBlocks", registry);
+ final MetricsIntValue corruptBlocks = new MetricsIntValue("CorruptBlocks",
registry);
+ final MetricsIntValue excessBlocks = new MetricsIntValue("ExcessBlocks",
registry);
+ final MetricsIntValue pendingReplicationBlocks = new
MetricsIntValue("PendingReplicationBlocks", registry);
+ final MetricsIntValue underReplicatedBlocks = new
MetricsIntValue("UnderReplicatedBlocks", registry);
+ final MetricsIntValue scheduledReplicationBlocks = new
MetricsIntValue("ScheduledReplicationBlocks", registry);
+ final MetricsIntValue missingBlocks = new MetricsIntValue("MissingBlocks",
registry);
+ final MetricsIntValue blockCapacity = new MetricsIntValue("BlockCapacity",
registry);
private final FSNamesystem fsNameSystem;
@@ -103,12 +106,16 @@
capacityRemainingGB.set(roundBytesToGBytes(fsNameSystem.
getCapacityRemaining()));
totalLoad.set(fsNameSystem.getTotalLoad());
+ corruptBlocks.set((int)fsNameSystem.getCorruptReplicaBlocks());
+ excessBlocks.set((int)fsNameSystem.getExcessBlocks());
+ pendingDeletionBlocks.set((int)fsNameSystem.getPendingDeletionBlocks());
pendingReplicationBlocks.set((int)fsNameSystem.
getPendingReplicationBlocks());
underReplicatedBlocks.set((int)fsNameSystem.getUnderReplicatedBlocks());
scheduledReplicationBlocks.set((int)fsNameSystem.
getScheduledReplicationBlocks());
missingBlocks.set((int)fsNameSystem.getMissingBlocksCount());
+ blockCapacity.set(fsNameSystem.getBlockCapacity());
for (MetricsBase m : registry.getMetricsList()) {
m.pushMetric(metricsRecord);
Added:
hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=785025&view=auto
==============================================================================
---
hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
(added)
+++
hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
Mon Jun 15 23:24:51 2009
@@ -0,0 +1,152 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.metrics;
+
+import java.io.IOException;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.namenode.BlockManager;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+
+/**
+ * Test for metrics published by the Namenode
+ */
+public class TestNameNodeMetrics extends TestCase {
+ private static final Configuration CONF = new Configuration();
+ static {
+ CONF.setLong("dfs.block.size", 100);
+ CONF.setInt("io.bytes.per.checksum", 1);
+ CONF.setLong("dfs.heartbeat.interval", 1L);
+ CONF.setInt("dfs.replication.interval", 1);
+ }
+
+ private MiniDFSCluster cluster;
+ private FSNamesystemMetrics metrics;
+ private DistributedFileSystem fs;
+ private Random rand = new Random();
+ private FSNamesystem namesystem;
+
+ @Override
+ protected void setUp() throws Exception {
+ cluster = new MiniDFSCluster(CONF, 3, true, null);
+ cluster.waitActive();
+ namesystem = cluster.getNameNode().getNamesystem();
+ fs = (DistributedFileSystem) cluster.getFileSystem();
+ metrics = namesystem.getFSNamesystemMetrics();
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ cluster.shutdown();
+ }
+
+ /** create a file with a length of <code>fileLen</code> */
+ private void createFile(String fileName, long fileLen, short replicas)
throws IOException {
+ Path filePath = new Path(fileName);
+ DFSTestUtil.createFile(fs, filePath, fileLen, replicas, rand.nextLong());
+ }
+
+ private void updateMetrics() throws Exception {
+ // Wait for metrics update (corresponds to dfs.replication.interval
+ // for some block related metrics to get updated)
+ Thread.sleep(1000);
+ metrics.doUpdates(null);
+ }
+
+ /** Test metrics associated with addition of a file */
+ public void testFileAdd() throws Exception {
+ // Add files with 100 blocks
+ final String file = "/tmp/t";
+ createFile(file, 3200, (short)3);
+ final int blockCount = 32;
+ int blockCapacity = namesystem.getBlockCapacity();
+ updateMetrics();
+ assertEquals(blockCapacity, metrics.blockCapacity.get());
+
+ // Blocks are stored in a hashmap. Compute its capacity, which
+ // doubles every time the number of entries reach the threshold.
+ int threshold = (int)(blockCapacity *
BlockManager.DEFAULT_MAP_LOAD_FACTOR);
+ while (threshold < blockCount) {
+ blockCapacity <<= 1;
+ }
+ updateMetrics();
+ assertEquals(3, metrics.filesTotal.get());
+ assertEquals(blockCount, metrics.blocksTotal.get());
+ assertEquals(blockCapacity, metrics.blockCapacity.get());
+ fs.delete(new Path(file), true);
+ }
+
+ /** Corrupt a block and ensure metrics reflects it */
+ public void testCorruptBlock() throws Exception {
+ // Create a file with single block with two replicas
+ String file = "/tmp/t";
+ createFile(file, 100, (short)2);
+
+ // Corrupt first replica of the block
+ LocatedBlock block = namesystem.getBlockLocations(file, 0, 1).get(0);
+ namesystem.markBlockAsCorrupt(block.getBlock(), block.getLocations()[0]);
+ updateMetrics();
+ assertEquals(1, metrics.corruptBlocks.get());
+ assertEquals(1, metrics.pendingReplicationBlocks.get());
+ assertEquals(1, metrics.scheduledReplicationBlocks.get());
+ fs.delete(new Path(file), true);
+ updateMetrics();
+ assertEquals(0, metrics.corruptBlocks.get());
+ assertEquals(0, metrics.pendingReplicationBlocks.get());
+ assertEquals(0, metrics.scheduledReplicationBlocks.get());
+ }
+
+ /** Create excess blocks by reducing the replication factor for
+ * for a file and ensure metrics reflects it
+ */
+ public void testExcessBlocks() throws Exception {
+ String file = "/tmp/t";
+ createFile(file, 100, (short)2);
+ int totalBlocks = 1;
+ namesystem.setReplication(file, (short)1);
+ updateMetrics();
+ assertEquals(totalBlocks, metrics.excessBlocks.get());
+ assertEquals(totalBlocks, metrics.pendingDeletionBlocks.get());
+ fs.delete(new Path(file), true);
+ }
+
+ /** Test to ensure metrics reflects missing blocks */
+ public void testMissingBlock() throws Exception {
+ // Create a file with single block with two replicas
+ String file = "/tmp/t";
+ createFile(file, 100, (short)1);
+
+ // Corrupt the only replica of the block to result in a missing block
+ LocatedBlock block = namesystem.getBlockLocations(file, 0, 1).get(0);
+ namesystem.markBlockAsCorrupt(block.getBlock(), block.getLocations()[0]);
+ updateMetrics();
+ assertEquals(1, metrics.underReplicatedBlocks.get());
+ assertEquals(1, metrics.missingBlocks.get());
+ fs.delete(new Path(file), true);
+ updateMetrics();
+ assertEquals(0, metrics.underReplicatedBlocks.get());
+ }
+}