Author: shv
Date: Fri Jun 26 22:50:08 2009
New Revision: 788899
URL: http://svn.apache.org/viewvc?rev=788899&view=rev
Log:
HADOOP-5897. Merge -r 785024:785025 from trunk to branch 0.20.
Added:
hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/server/namenode/metrics/
hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
(with props)
Modified:
hadoop/common/branches/branch-0.20/ (props changed)
hadoop/common/branches/branch-0.20/CHANGES.txt
hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java
hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java
hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java
Propchange: hadoop/common/branches/branch-0.20/
------------------------------------------------------------------------------
--- svn:ignore (original)
+++ svn:ignore Fri Jun 26 22:50:08 2009
@@ -3,3 +3,5 @@
.classpath
.project
.settings
+
+.externalToolBuilders
Modified: hadoop/common/branches/branch-0.20/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20/CHANGES.txt?rev=788899&r1=788898&r2=788899&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20/CHANGES.txt (original)
+++ hadoop/common/branches/branch-0.20/CHANGES.txt Fri Jun 26 22:50:08 2009
@@ -26,6 +26,9 @@
HADOOP-4372. Improves the way history filenames are obtained and
manipulated.
(Amar Kamat via ddas)
+ HADOOP-5897. Add name-node metrics to capture java heap usage.
+ (Suresh Srinivas via shv)
+
HDFS-438. Improve help message for space quota command. (Raghu Angadi)
OPTIMIZATIONS
Modified:
hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java?rev=788899&r1=788898&r2=788899&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java
(original)
+++
hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java
Fri Jun 26 22:50:08 2009
@@ -290,7 +290,20 @@
}
}
- private Map<Block, BlockInfo> map = new HashMap<Block, BlockInfo>();
+ // Used for tracking HashMap capacity growth
+ private int capacity;
+ private final float loadFactor;
+
+ private Map<BlockInfo, BlockInfo> map;
+
+ BlocksMap(int initialCapacity, float loadFactor) {
+ this.capacity = 1;
+ // Capacity is initialized to the next multiple of 2 of initialCapacity
+ while (this.capacity < initialCapacity)
+ this.capacity <<= 1;
+ this.loadFactor = loadFactor;
+ this.map = new HashMap<BlockInfo, BlockInfo>(initialCapacity, loadFactor);
+ }
/**
* Add BlockInfo if mapping does not exist.
@@ -421,4 +434,18 @@
return true;
}
+
+ /** Get the capacity of the HashMap that stores blocks */
+ public int getCapacity() {
+ // Capacity doubles every time the map size reaches the threshold
+ while (map.size() > (int)(capacity * loadFactor)) {
+ capacity <<= 1;
+ }
+ return capacity;
+ }
+
+ /** Get the load factor of the map */
+ public float getLoadFactor() {
+ return loadFactor;
+ }
}
Modified:
hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java?rev=788899&r1=788898&r2=788899&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java
(original)
+++
hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java
Fri Jun 26 22:50:08 2009
@@ -61,10 +61,6 @@
"on " + dn.getName() +
" by " + Server.getRemoteIp());
}
- if (NameNode.getNameNodeMetrics() != null) {
- NameNode.getNameNodeMetrics().numBlocksCorrupted.set(
- corruptReplicasMap.size());
- }
}
/**
@@ -75,10 +71,6 @@
void removeFromCorruptReplicasMap(Block blk) {
if (corruptReplicasMap != null) {
corruptReplicasMap.remove(blk);
- if (NameNode.getNameNodeMetrics() != null) {
- NameNode.getNameNodeMetrics().numBlocksCorrupted.set(
- corruptReplicasMap.size());
- }
}
}
Modified:
hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=788899&r1=788898&r2=788899&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
(original)
+++
hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
Fri Jun 26 22:50:08 2009
@@ -117,6 +117,10 @@
public static final Log auditLog = LogFactory.getLog(
FSNamesystem.class.getName() + ".audit");
+ // Default initial capacity and load factor of map
+ public static final int DEFAULT_INITIAL_MAP_CAPACITY = 16;
+ public static final float DEFAULT_MAP_LOAD_FACTOR = 0.75f;
+
private boolean isPermissionEnabled;
private UserGroupInformation fsOwner;
private String supergroup;
@@ -125,9 +129,13 @@
private FSNamesystemMetrics myFSMetrics;
private long capacityTotal = 0L, capacityUsed = 0L, capacityRemaining = 0L;
private int totalLoad = 0;
- private long pendingReplicationBlocksCount = 0L, corruptReplicaBlocksCount,
- underReplicatedBlocksCount = 0L, scheduledReplicationBlocksCount = 0L;
+ volatile long pendingReplicationBlocksCount = 0L;
+ volatile long corruptReplicaBlocksCount = 0L;
+ volatile long underReplicatedBlocksCount = 0L;
+ volatile long scheduledReplicationBlocksCount = 0L;
+ volatile long excessBlocksCount = 0L;
+ volatile long pendingDeletionBlocksCount = 0L;
//
// Stores the correct file name hierarchy
//
@@ -137,7 +145,8 @@
// Mapping: Block -> { INode, datanodes, self ref }
// Updated only in response to client-sent information.
//
- BlocksMap blocksMap = new BlocksMap();
+ final BlocksMap blocksMap = new BlocksMap(DEFAULT_INITIAL_MAP_CAPACITY,
+ DEFAULT_MAP_LOAD_FACTOR);
//
// Store blocks-->datanodedescriptor(s) map of corrupt replicas
@@ -1181,7 +1190,9 @@
// This reduces the possibility of triggering HADOOP-1349.
//
for(Collection<Block> v : recentInvalidateSets.values()) {
- v.remove(last);
+ if (v.remove(last)) {
+ pendingDeletionBlocksCount--;
+ }
}
}
}
@@ -1461,8 +1472,11 @@
* Remove a datanode from the invalidatesSet
* @param n datanode
*/
- private void removeFromInvalidates(DatanodeInfo n) {
- recentInvalidateSets.remove(n.getStorageID());
+ void removeFromInvalidates(String storageID) {
+ Collection<Block> blocks = recentInvalidateSets.remove(storageID);
+ if (blocks != null) {
+ pendingDeletionBlocksCount -= blocks.size();
+ }
}
/**
@@ -1489,7 +1503,9 @@
invalidateSet = new HashSet<Block>();
recentInvalidateSets.put(n.getStorageID(), invalidateSet);
}
- invalidateSet.add(b);
+ if (invalidateSet.add(b)) {
+ pendingDeletionBlocksCount++;
+ }
}
/**
@@ -1509,7 +1525,8 @@
*/
private synchronized void dumpRecentInvalidateSets(PrintWriter out) {
int size = recentInvalidateSets.values().size();
- out.println("Metasave: Blocks waiting deletion from "+size+" datanodes.");
+ out.println("Metasave: Blocks " + pendingDeletionBlocksCount
+ + " waiting deletion from " + size + " datanodes.");
if (size == 0) {
return;
}
@@ -2658,9 +2675,13 @@
String firstNodeId = recentInvalidateSets.keySet().iterator().next();
assert firstNodeId != null;
DatanodeDescriptor dn = datanodeMap.get(firstNodeId);
- Collection<Block> invalidateSet = recentInvalidateSets.remove(firstNodeId);
-
- if(invalidateSet == null || dn == null)
+ if (dn == null) {
+ removeFromInvalidates(firstNodeId);
+ return 0;
+ }
+
+ Collection<Block> invalidateSet = recentInvalidateSets.get(firstNodeId);
+ if(invalidateSet == null)
return 0;
ArrayList<Block> blocksToInvalidate =
@@ -2674,10 +2695,10 @@
it.remove();
}
- // If we could not send everything in this message, reinsert this item
- // into the collection.
- if(it.hasNext())
- recentInvalidateSets.put(firstNodeId, invalidateSet);
+ // If we send everything in this message, remove this node entry
+ if (!it.hasNext()) {
+ removeFromInvalidates(firstNodeId);
+ }
dn.addBlocksToBeInvalidated(blocksToInvalidate);
@@ -2756,7 +2777,7 @@
void unprotectedRemoveDatanode(DatanodeDescriptor nodeDescr) {
nodeDescr.resetBlocks();
- removeFromInvalidates(nodeDescr);
+ removeFromInvalidates(nodeDescr.getStorageID());
NameNode.stateChangeLog.debug(
"BLOCK*
NameSystem.unprotectedRemoveDatanode: "
+ nodeDescr.getName() + " is out of service
now.");
@@ -3265,9 +3286,12 @@
excessBlocks = new TreeSet<Block>();
excessReplicateMap.put(cur.getStorageID(), excessBlocks);
}
- excessBlocks.add(b);
- NameNode.stateChangeLog.debug("BLOCK* NameSystem.chooseExcessReplicates:
"
- +"("+cur.getName()+", "+b+") is added to
excessReplicateMap");
+ if (excessBlocks.add(b)) {
+ excessBlocksCount++;
+ NameNode.stateChangeLog.debug("BLOCK*
NameSystem.chooseExcessReplicates: "
+ +"("+cur.getName()+", "+b
+ +") is added to excessReplicateMap");
+ }
//
// The 'excessblocks' tracks blocks until we get confirmation
@@ -3315,11 +3339,13 @@
//
Collection<Block> excessBlocks =
excessReplicateMap.get(node.getStorageID());
if (excessBlocks != null) {
- excessBlocks.remove(block);
- NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: "
- +block+" is removed from excessBlocks");
- if (excessBlocks.size() == 0) {
- excessReplicateMap.remove(node.getStorageID());
+ if (excessBlocks.remove(block)) {
+ excessBlocksCount--;
+ NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: "
+ + block + " is removed from excessBlocks");
+ if (excessBlocks.size() == 0) {
+ excessReplicateMap.remove(node.getStorageID());
+ }
}
}
@@ -4229,11 +4255,7 @@
if (blockTotal == -1 && blockSafe == -1) {
return true; // manual safe mode
}
- int activeBlocks = blocksMap.size();
- for(Iterator<Collection<Block>> it =
- recentInvalidateSets.values().iterator(); it.hasNext();) {
- activeBlocks -= it.next().size();
- }
+ int activeBlocks = blocksMap.size() - (int)pendingDeletionBlocksCount;
return (blockTotal == activeBlocks) ||
(blockSafe >= 0 && blockSafe <= blockTotal);
}
@@ -4521,7 +4543,7 @@
}
/** Returns number of blocks with corrupt replicas */
- public long getCorruptReplicaBlocksCount() {
+ public long getCorruptReplicaBlocks() {
return corruptReplicaBlocksCount;
}
@@ -4529,6 +4551,18 @@
return scheduledReplicationBlocksCount;
}
+ public long getPendingDeletionBlocks() {
+ return pendingDeletionBlocksCount;
+ }
+
+ public long getExcessBlocks() {
+ return excessBlocksCount;
+ }
+
+ public synchronized int getBlockCapacity() {
+ return blocksMap.getCapacity();
+ }
+
public String getFSState() {
return isInSafeMode() ? "safeMode" : "Operational";
}
Modified:
hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java?rev=788899&r1=788898&r2=788899&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java
(original)
+++
hadoop/common/branches/branch-0.20/src/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java
Fri Jun 26 22:50:08 2009
@@ -42,20 +42,24 @@
*/
public class FSNamesystemMetrics implements Updater {
private static Log log = LogFactory.getLog(FSNamesystemMetrics.class);
- private final MetricsRecord metricsRecord;
+ final MetricsRecord metricsRecord;
public MetricsRegistry registry = new MetricsRegistry();
+ final MetricsIntValue filesTotal = new MetricsIntValue("FilesTotal",
registry);
+ final MetricsLongValue blocksTotal = new MetricsLongValue("BlocksTotal",
registry);
+ final MetricsIntValue capacityTotalGB = new
MetricsIntValue("CapacityTotalGB", registry);
+ final MetricsIntValue capacityUsedGB = new MetricsIntValue("CapacityUsedGB",
registry);
+ final MetricsIntValue capacityRemainingGB = new
MetricsIntValue("CapacityRemainingGB", registry);
+ final MetricsIntValue totalLoad = new MetricsIntValue("TotalLoad", registry);
+ final MetricsIntValue pendingDeletionBlocks = new
MetricsIntValue("PendingDeletionBlocks", registry);
+ final MetricsIntValue corruptBlocks = new MetricsIntValue("CorruptBlocks",
registry);
+ final MetricsIntValue excessBlocks = new MetricsIntValue("ExcessBlocks",
registry);
+ final MetricsIntValue pendingReplicationBlocks = new
MetricsIntValue("PendingReplicationBlocks", registry);
+ final MetricsIntValue underReplicatedBlocks = new
MetricsIntValue("UnderReplicatedBlocks", registry);
+ final MetricsIntValue scheduledReplicationBlocks = new
MetricsIntValue("ScheduledReplicationBlocks", registry);
+ final MetricsIntValue missingBlocks = new MetricsIntValue("MissingBlocks",
registry);
+ final MetricsIntValue blockCapacity = new MetricsIntValue("BlockCapacity",
registry);
- public MetricsIntValue filesTotal = new MetricsIntValue("FilesTotal",
registry);
- public MetricsLongValue blocksTotal = new MetricsLongValue("BlocksTotal",
registry);
- public MetricsIntValue capacityTotalGB = new
MetricsIntValue("CapacityTotalGB", registry);
- public MetricsIntValue capacityUsedGB = new
MetricsIntValue("CapacityUsedGB", registry);
- public MetricsIntValue capacityRemainingGB = new
MetricsIntValue("CapacityRemainingGB", registry);
- public MetricsIntValue totalLoad = new MetricsIntValue("TotalLoad",
registry);
- public MetricsIntValue pendingReplicationBlocks = new
MetricsIntValue("PendingReplicationBlocks", registry);
- public MetricsIntValue underReplicatedBlocks = new
MetricsIntValue("UnderReplicatedBlocks", registry);
- public MetricsIntValue scheduledReplicationBlocks = new
MetricsIntValue("ScheduledReplicationBlocks", registry);
- public MetricsIntValue missingBlocks = new MetricsIntValue("MissingBlocks",
registry);
public FSNamesystemMetrics(Configuration conf) {
String sessionId = conf.get("session.id");
@@ -100,12 +104,16 @@
capacityRemainingGB.set(roundBytesToGBytes(fsNameSystem.
getCapacityRemaining()));
totalLoad.set(fsNameSystem.getTotalLoad());
+ corruptBlocks.set((int)fsNameSystem.getCorruptReplicaBlocks());
+ excessBlocks.set((int)fsNameSystem.getExcessBlocks());
+ pendingDeletionBlocks.set((int)fsNameSystem.getPendingDeletionBlocks());
pendingReplicationBlocks.set((int)fsNameSystem.
getPendingReplicationBlocks());
underReplicatedBlocks.set((int)fsNameSystem.getUnderReplicatedBlocks());
scheduledReplicationBlocks.set((int)fsNameSystem.
getScheduledReplicationBlocks());
missingBlocks.set((int)fsNameSystem.getMissingBlocksCount());
+ blockCapacity.set(fsNameSystem.getBlockCapacity());
for (MetricsBase m : registry.getMetricsList()) {
m.pushMetric(metricsRecord);
Added:
hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=788899&view=auto
==============================================================================
---
hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
(added)
+++
hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
Fri Jun 26 22:50:08 2009
@@ -0,0 +1,151 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.metrics;
+
+import java.io.IOException;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+
+/**
+ * Test for metrics published by the Namenode
+ */
+public class TestNameNodeMetrics extends TestCase {
+ private static final Configuration CONF = new Configuration();
+ static {
+ CONF.setLong("dfs.block.size", 100);
+ CONF.setInt("io.bytes.per.checksum", 1);
+ CONF.setLong("dfs.heartbeat.interval", 1L);
+ CONF.setInt("dfs.replication.interval", 1);
+ }
+
+ private MiniDFSCluster cluster;
+ private FSNamesystemMetrics metrics;
+ private DistributedFileSystem fs;
+ private Random rand = new Random();
+ private FSNamesystem namesystem;
+
+ @Override
+ protected void setUp() throws Exception {
+ cluster = new MiniDFSCluster(CONF, 3, true, null);
+ cluster.waitActive();
+ namesystem = cluster.getNameNode().getNamesystem();
+ fs = (DistributedFileSystem) cluster.getFileSystem();
+ metrics = namesystem.getFSNamesystemMetrics();
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ cluster.shutdown();
+ }
+
+ /** create a file with a length of <code>fileLen</code> */
+ private void createFile(String fileName, long fileLen, short replicas)
throws IOException {
+ Path filePath = new Path(fileName);
+ DFSTestUtil.createFile(fs, filePath, fileLen, replicas, rand.nextLong());
+ }
+
+ private void updateMetrics() throws Exception {
+ // Wait for metrics update (corresponds to dfs.replication.interval
+ // for some block related metrics to get updated)
+ Thread.sleep(1000);
+ metrics.doUpdates(null);
+ }
+
+ /** Test metrics associated with addition of a file */
+ public void testFileAdd() throws Exception {
+ // Add files with 100 blocks
+ final String file = "/tmp/t";
+ createFile(file, 3200, (short)3);
+ final int blockCount = 32;
+ int blockCapacity = namesystem.getBlockCapacity();
+ updateMetrics();
+ assertEquals(blockCapacity, metrics.blockCapacity.get());
+
+ // Blocks are stored in a hashmap. Compute its capacity, which
+ // doubles every time the number of entries reach the threshold.
+ int threshold = (int)(blockCapacity *
FSNamesystem.DEFAULT_MAP_LOAD_FACTOR);
+ while (threshold < blockCount) {
+ blockCapacity <<= 1;
+ }
+ updateMetrics();
+ assertEquals(3, metrics.filesTotal.get());
+ assertEquals(blockCount, metrics.blocksTotal.get());
+ assertEquals(blockCapacity, metrics.blockCapacity.get());
+ fs.delete(new Path(file), true);
+ }
+
+ /** Corrupt a block and ensure metrics reflects it */
+ public void testCorruptBlock() throws Exception {
+ // Create a file with single block with two replicas
+ String file = "/tmp/t";
+ createFile(file, 100, (short)2);
+
+ // Corrupt first replica of the block
+ LocatedBlock block = namesystem.getBlockLocations(file, 0, 1).get(0);
+ namesystem.markBlockAsCorrupt(block.getBlock(), block.getLocations()[0]);
+ updateMetrics();
+ assertEquals(1, metrics.corruptBlocks.get());
+ assertEquals(1, metrics.pendingReplicationBlocks.get());
+ assertEquals(1, metrics.scheduledReplicationBlocks.get());
+ fs.delete(new Path(file), true);
+ updateMetrics();
+ assertEquals(0, metrics.corruptBlocks.get());
+ assertEquals(0, metrics.pendingReplicationBlocks.get());
+ assertEquals(0, metrics.scheduledReplicationBlocks.get());
+ }
+
+ /** Create excess blocks by reducing the replication factor for
+ * for a file and ensure metrics reflects it
+ */
+ public void testExcessBlocks() throws Exception {
+ String file = "/tmp/t";
+ createFile(file, 100, (short)2);
+ int totalBlocks = 1;
+ namesystem.setReplication(file, (short)1);
+ updateMetrics();
+ assertEquals(totalBlocks, metrics.excessBlocks.get());
+ assertEquals(totalBlocks, metrics.pendingDeletionBlocks.get());
+ fs.delete(new Path(file), true);
+ }
+
+ /** Test to ensure metrics reflects missing blocks */
+ public void testMissingBlock() throws Exception {
+ // Create a file with single block with two replicas
+ String file = "/tmp/t";
+ createFile(file, 100, (short)1);
+
+ // Corrupt the only replica of the block to result in a missing block
+ LocatedBlock block = namesystem.getBlockLocations(file, 0, 1).get(0);
+ namesystem.markBlockAsCorrupt(block.getBlock(), block.getLocations()[0]);
+ updateMetrics();
+ assertEquals(1, metrics.underReplicatedBlocks.get());
+ assertEquals(1, metrics.missingBlocks.get());
+ fs.delete(new Path(file), true);
+ updateMetrics();
+ assertEquals(0, metrics.underReplicatedBlocks.get());
+ }
+}
Propchange:
hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
------------------------------------------------------------------------------
svn:mime-type = text/plain