Author: arp Date: Tue Nov 12 01:16:10 2013 New Revision: 1540910 URL: http://svn.apache.org/r1540910 Log: Merging r1540548 through r1540909 from trunk to branch HDFS-2832
Removed: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpURLTimeouts.java Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/ (props changed) hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed) hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpFileSystem.java Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1540548-1540909 Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1540910&r1=1540909&r2=1540910&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Nov 12 01:16:10 2013 @@ -364,6 +364,8 @@ Trunk (Unreleased) HDFS-5482. DistributedFileSystem#listPathBasedCacheDirectives must support relative paths. (Colin Patrick McCabe via cnauroth) + HDFS-5320. Add datanode caching metrics. (wang) + Release 2.3.0 - UNRELEASED INCOMPATIBLE CHANGES @@ -463,6 +465,9 @@ Release 2.3.0 - UNRELEASED HDFS-5371. Let client retry the same NN when "dfs.client.test.drop.namenode.response.number" is enabled. (jing9) + HDFS-5467. Remove tab characters in hdfs-default.xml. + (Shinichi Yamashita via Andrew Wang) + OPTIMIZATIONS HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn) @@ -519,6 +524,8 @@ Release 2.3.0 - UNRELEASED HDFS-5325. Remove WebHdfsFileSystem#ConnRunner. (Haohui Mai via jing9) + HDFS-5488. Clean up TestHftpURLTimeout. (Haohui Mai via jing9) + Release 2.2.1 - UNRELEASED INCOMPATIBLE CHANGES Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1540548-1540909 Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java?rev=1540910&r1=1540909&r2=1540910&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java Tue Nov 12 01:16:10 2013 @@ -155,7 +155,7 @@ public class DatanodeProtocolClientSideT @Override public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration, - StorageReport[] reports, long dnCacheCapacity, long dnCacheUsed, + StorageReport[] reports, long cacheCapacity, long cacheUsed, int xmitsInProgress, int xceiverCount, int failedVolumes) throws IOException { HeartbeatRequestProto.Builder builder = HeartbeatRequestProto.newBuilder() @@ -165,11 +165,11 @@ public class DatanodeProtocolClientSideT for (StorageReport r : reports) { builder.addReports(PBHelper.convert(r)); } - if (dnCacheCapacity != 0) { - builder.setDnCacheCapacity(dnCacheCapacity); + if (cacheCapacity != 0) { + builder.setCacheCapacity(cacheCapacity); } - if (dnCacheUsed != 0) { - builder.setDnCacheUsed(dnCacheUsed); + if (cacheUsed != 0) { + builder.setCacheUsed(cacheUsed); } HeartbeatResponseProto resp; try { Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java?rev=1540910&r1=1540909&r2=1540910&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java Tue Nov 12 01:16:10 2013 @@ -104,7 +104,7 @@ public class DatanodeProtocolServerSideT final StorageReport[] report = PBHelper.convertStorageReports( request.getReportsList()); response = impl.sendHeartbeat(PBHelper.convert(request.getRegistration()), - report, request.getDnCacheCapacity(), request.getDnCacheUsed(), + report, request.getCacheCapacity(), request.getCacheUsed(), request.getXmitsInProgress(), request.getXceiverCount(), request.getFailedVolumes()); } catch (IOException e) { Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java?rev=1540910&r1=1540909&r2=1540910&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java Tue Nov 12 01:16:10 2013 @@ -476,7 +476,7 @@ class BPServiceActor implements Runnable DatanodeCommand cacheReport() throws IOException { // If caching is disabled, do not send a cache report - if (dn.getFSDataset().getDnCacheCapacity() == 0) { + if (dn.getFSDataset().getCacheCapacity() == 0) { return null; } // send cache report if timer has expired. @@ -514,8 +514,8 @@ class BPServiceActor implements Runnable return bpNamenode.sendHeartbeat(bpRegistration, reports, - dn.getFSDataset().getDnCacheCapacity(), - dn.getFSDataset().getDnCacheUsed(), + dn.getFSDataset().getCacheCapacity(), + dn.getFSDataset().getCacheUsed(), dn.getXmitsInProgress(), dn.getXceiverCount(), dn.getFSDataset().getNumFailedVolumes()); Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java?rev=1540910&r1=1540909&r2=1540910&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java Tue Nov 12 01:16:10 2013 @@ -226,6 +226,15 @@ public class FsDatasetCache { */ private final long maxBytes; + /** + * Number of cache commands that could not be completed successfully + */ + AtomicLong numBlocksFailedToCache = new AtomicLong(0); + /** + * Number of uncache commands that could not be completed successfully + */ + AtomicLong numBlocksFailedToUncache = new AtomicLong(0); + public FsDatasetCache(FsDatasetImpl dataset) { this.dataset = dataset; this.maxBytes = dataset.datanode.getDnConf().getMaxLockedMemory(); @@ -274,6 +283,7 @@ public class FsDatasetCache { " already exists in the FsDatasetCache with state " + prevValue.state); } + numBlocksFailedToCache.incrementAndGet(); return; } mappableBlockMap.put(key, new Value(null, State.CACHING)); @@ -291,6 +301,7 @@ public class FsDatasetCache { "does not need to be uncached, because it is not currently " + "in the mappableBlockMap."); } + numBlocksFailedToUncache.incrementAndGet(); return; } switch (prevValue.state) { @@ -317,6 +328,7 @@ public class FsDatasetCache { "does not need to be uncached, because it is " + "in state " + prevValue.state + "."); } + numBlocksFailedToUncache.incrementAndGet(); break; } } @@ -349,7 +361,8 @@ public class FsDatasetCache { LOG.warn("Failed to cache block id " + key.id + ", pool " + key.bpid + ": could not reserve " + length + " more bytes in the " + "cache: " + DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY + - " of " + maxBytes + " exceeded."); + " of " + maxBytes + " exceeded."); + numBlocksFailedToCache.incrementAndGet(); return; } try { @@ -413,6 +426,7 @@ public class FsDatasetCache { if (mappableBlock != null) { mappableBlock.close(); } + numBlocksFailedToCache.incrementAndGet(); } } } @@ -449,7 +463,7 @@ public class FsDatasetCache { } } - // Stats related methods for FsDatasetMBean + // Stats related methods for FSDatasetMBean /** * Get the approximate amount of cache space used. @@ -464,4 +478,13 @@ public class FsDatasetCache { public long getDnCacheCapacity() { return maxBytes; } + + public long getNumBlocksFailedToCache() { + return numBlocksFailedToCache.get(); + } + + public long getNumBlocksFailedToUncache() { + return numBlocksFailedToUncache.get(); + } + } Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java?rev=1540910&r1=1540909&r2=1540910&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java Tue Nov 12 01:16:10 2013 @@ -339,22 +339,26 @@ class FsDatasetImpl implements FsDataset return volumes.numberOfFailedVolumes(); } - /** - * Returns the total cache used by the datanode (in bytes). - */ @Override // FSDatasetMBean - public long getDnCacheUsed() { + public long getCacheUsed() { return cacheManager.getDnCacheUsed(); } - /** - * Returns the total cache capacity of the datanode (in bytes). - */ @Override // FSDatasetMBean - public long getDnCacheCapacity() { + public long getCacheCapacity() { return cacheManager.getDnCacheCapacity(); } + @Override // FSDatasetMBean + public long getNumBlocksFailedToCache() { + return cacheManager.getNumBlocksFailedToCache(); + } + + @Override // FSDatasetMBean + public long getNumBlocksFailedToUncache() { + return cacheManager.getNumBlocksFailedToUncache(); + } + /** * Find the block's on-disk length */ @@ -1269,28 +1273,36 @@ class FsDatasetImpl implements FsDataset synchronized (this) { ReplicaInfo info = volumeMap.get(bpid, blockId); - if (info == null) { - LOG.warn("Failed to cache block with id " + blockId + ", pool " + - bpid + ": ReplicaInfo not found."); - return; - } - if (info.getState() != ReplicaState.FINALIZED) { - LOG.warn("Failed to cache block with id " + blockId + ", pool " + - bpid + ": replica is not finalized; it is in state " + - info.getState()); - return; - } + boolean success = false; try { - volume = (FsVolumeImpl)info.getVolume(); - if (volume == null) { + if (info == null) { LOG.warn("Failed to cache block with id " + blockId + ", pool " + - bpid + ": volume not found."); + bpid + ": ReplicaInfo not found."); return; } - } catch (ClassCastException e) { - LOG.warn("Failed to cache block with id " + blockId + - ": volume was not an instance of FsVolumeImpl."); - return; + if (info.getState() != ReplicaState.FINALIZED) { + LOG.warn("Failed to cache block with id " + blockId + ", pool " + + bpid + ": replica is not finalized; it is in state " + + info.getState()); + return; + } + try { + volume = (FsVolumeImpl)info.getVolume(); + if (volume == null) { + LOG.warn("Failed to cache block with id " + blockId + ", pool " + + bpid + ": volume not found."); + return; + } + } catch (ClassCastException e) { + LOG.warn("Failed to cache block with id " + blockId + + ": volume was not an instance of FsVolumeImpl."); + return; + } + success = true; + } finally { + if (!success) { + cacheManager.numBlocksFailedToCache.incrementAndGet(); + } } blockFileName = info.getBlockFile().getAbsolutePath(); length = info.getVisibleLength(); Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java?rev=1540910&r1=1540909&r2=1540910&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java Tue Nov 12 01:16:10 2013 @@ -79,12 +79,22 @@ public interface FSDatasetMBean { public int getNumFailedVolumes(); /** - * Returns the total cache used by the datanode (in bytes). + * Returns the amount of cache used by the datanode (in bytes). */ - public long getDnCacheUsed(); + public long getCacheUsed(); /** * Returns the total cache capacity of the datanode (in bytes). */ - public long getDnCacheCapacity(); + public long getCacheCapacity(); + + /** + * Returns the number of blocks that the datanode was unable to cache + */ + public long getNumBlocksFailedToCache(); + + /** + * Returns the number of blocks that the datanode was unable to uncache + */ + public long getNumBlocksFailedToUncache(); } Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto?rev=1540910&r1=1540909&r2=1540910&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto Tue Nov 12 01:16:10 2013 @@ -191,8 +191,8 @@ message HeartbeatRequestProto { optional uint32 xmitsInProgress = 3 [ default = 0 ]; optional uint32 xceiverCount = 4 [ default = 0 ]; optional uint32 failedVolumes = 5 [ default = 0 ]; - optional uint64 dnCacheCapacity = 6 [ default = 0 ]; - optional uint64 dnCacheUsed = 7 [default = 0 ]; + optional uint64 cacheCapacity = 6 [ default = 0 ]; + optional uint64 cacheUsed = 7 [default = 0 ]; } message StorageReportProto { Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1540910&r1=1540909&r2=1540910&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Tue Nov 12 01:16:10 2013 @@ -1393,43 +1393,43 @@ </property> <property> - <name>dfs.namenode.enable.retrycache</name> - <value>true</value> - <description> - This enables the retry cache on the namenode. Namenode tracks for - non-idempotent requests the corresponding response. If a client retries the - request, the response from the retry cache is sent. Such operations - are tagged with annotation @AtMostOnce in namenode protocols. It is - recommended that this flag be set to true. Setting it to false, will result - in clients getting failure responses to retried request. This flag must - be enabled in HA setup for transparent fail-overs. - - The entries in the cache have expiration time configurable - using dfs.namenode.retrycache.expirytime.millis. - </description> + <name>dfs.namenode.enable.retrycache</name> + <value>true</value> + <description> + This enables the retry cache on the namenode. Namenode tracks for + non-idempotent requests the corresponding response. If a client retries the + request, the response from the retry cache is sent. Such operations + are tagged with annotation @AtMostOnce in namenode protocols. It is + recommended that this flag be set to true. Setting it to false, will result + in clients getting failure responses to retried request. This flag must + be enabled in HA setup for transparent fail-overs. + + The entries in the cache have expiration time configurable + using dfs.namenode.retrycache.expirytime.millis. + </description> </property> <property> - <name>dfs.namenode.retrycache.expirytime.millis</name> - <value>600000</value> - <description> - The time for which retry cache entries are retained. - </description> + <name>dfs.namenode.retrycache.expirytime.millis</name> + <value>600000</value> + <description> + The time for which retry cache entries are retained. + </description> </property> <property> - <name>dfs.namenode.retrycache.heap.percent</name> - <value>0.03f</value> - <description> - This parameter configures the heap size allocated for retry cache - (excluding the response cached). This corresponds to approximately - 4096 entries for every 64MB of namenode process java heap size. - Assuming retry cache entry expiration time (configured using - dfs.namenode.retrycache.expirytime.millis) of 10 minutes, this - enables retry cache to support 7 operations per second sustained - for 10 minutes. As the heap size is increased, the operation rate - linearly increases. - </description> + <name>dfs.namenode.retrycache.heap.percent</name> + <value>0.03f</value> + <description> + This parameter configures the heap size allocated for retry cache + (excluding the response cached). This corresponds to approximately + 4096 entries for every 64MB of namenode process java heap size. + Assuming retry cache entry expiration time (configured using + dfs.namenode.retrycache.expirytime.millis) of 10 minutes, this + enables retry cache to support 7 operations per second sustained + for 10 minutes. As the heap size is increased, the operation rate + linearly increases. + </description> </property> <property> Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1540910&r1=1540909&r2=1540910&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java Tue Nov 12 01:16:10 2013 @@ -524,12 +524,22 @@ public class SimulatedFSDataset implemen } @Override // FSDatasetMBean - public long getDnCacheUsed() { + public long getCacheUsed() { return 0l; } @Override // FSDatasetMBean - public long getDnCacheCapacity() { + public long getCacheCapacity() { + return 0l; + } + + @Override + public long getNumBlocksFailedToCache() { + return 0l; + } + + @Override + public long getNumBlocksFailedToUncache() { return 0l; } Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java?rev=1540910&r1=1540909&r2=1540910&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java Tue Nov 12 01:16:10 2013 @@ -17,11 +17,13 @@ */ package org.apache.hadoop.hdfs.server.datanode; +import static junit.framework.Assert.assertTrue; +import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import static org.junit.Assert.assertEquals; import static org.junit.Assume.assumeTrue; import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyLong; import static org.mockito.Matchers.anyInt; +import static org.mockito.Matchers.anyLong; import static org.mockito.Mockito.doReturn; import java.io.FileInputStream; @@ -57,14 +59,15 @@ import org.apache.hadoop.hdfs.server.pro import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.io.nativeio.NativeIO; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.MetricsAsserts; import org.apache.log4j.Logger; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; -import com.google.common.base.Preconditions; import com.google.common.base.Supplier; public class TestFsDatasetCache { @@ -94,6 +97,7 @@ public class TestFsDatasetCache { conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, CACHE_CAPACITY); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1).build(); @@ -187,7 +191,7 @@ public class TestFsDatasetCache { @Override public Boolean get() { - long curDnCacheUsed = fsd.getDnCacheUsed(); + long curDnCacheUsed = fsd.getCacheUsed(); if (curDnCacheUsed != expected) { if (tries++ > 10) { LOG.info("verifyExpectedCacheUsage: expected " + @@ -222,22 +226,37 @@ public class TestFsDatasetCache { final long[] blockSizes = getBlockSizes(locs); // Check initial state - final long cacheCapacity = fsd.getDnCacheCapacity(); - long cacheUsed = fsd.getDnCacheUsed(); + final long cacheCapacity = fsd.getCacheCapacity(); + long cacheUsed = fsd.getCacheUsed(); long current = 0; assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity); assertEquals("Unexpected amount of cache used", current, cacheUsed); + MetricsRecordBuilder dnMetrics; + long numCacheCommands = 0; + long numUncacheCommands = 0; + // Cache each block in succession, checking each time for (int i=0; i<NUM_BLOCKS; i++) { setHeartbeatResponse(cacheBlock(locs[i])); current = verifyExpectedCacheUsage(current + blockSizes[i]); + dnMetrics = getMetrics(dn.getMetrics().name()); + long cmds = MetricsAsserts.getLongCounter("BlocksCached", dnMetrics); + assertTrue("Expected more cache requests from the NN (" + + cmds + " <= " + numCacheCommands + ")", + cmds > numCacheCommands); + numCacheCommands = cmds; } // Uncache each block in succession, again checking each time for (int i=0; i<NUM_BLOCKS; i++) { setHeartbeatResponse(uncacheBlock(locs[i])); current = verifyExpectedCacheUsage(current - blockSizes[i]); + dnMetrics = getMetrics(dn.getMetrics().name()); + long cmds = MetricsAsserts.getLongCounter("BlocksUncached", dnMetrics); + assertTrue("Expected more uncache requests from the NN", + cmds > numUncacheCommands); + numUncacheCommands = cmds; } LOG.info("finishing testCacheAndUncacheBlock"); } @@ -293,6 +312,9 @@ public class TestFsDatasetCache { return lines > 0; } }, 500, 30000); + // Also check the metrics for the failure + assertTrue("Expected more than 0 failed cache attempts", + fsd.getNumBlocksFailedToCache() > 0); // Uncache the n-1 files for (int i=0; i<numFiles-1; i++) { @@ -322,8 +344,8 @@ public class TestFsDatasetCache { final long[] blockSizes = getBlockSizes(locs); // Check initial state - final long cacheCapacity = fsd.getDnCacheCapacity(); - long cacheUsed = fsd.getDnCacheUsed(); + final long cacheCapacity = fsd.getCacheCapacity(); + long cacheUsed = fsd.getCacheUsed(); long current = 0; assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity); assertEquals("Unexpected amount of cache used", current, cacheUsed); @@ -354,4 +376,24 @@ public class TestFsDatasetCache { current = verifyExpectedCacheUsage(0); LOG.info("finishing testUncachingBlocksBeforeCachingFinishes"); } + + @Test(timeout=60000) + public void testUncacheUnknownBlock() throws Exception { + // Create a file + Path fileName = new Path("/testUncacheUnknownBlock"); + int fileLen = 4096; + DFSTestUtil.createFile(fs, fileName, fileLen, (short)1, 0xFDFD); + HdfsBlockLocation[] locs = (HdfsBlockLocation[])fs.getFileBlockLocations( + fileName, 0, fileLen); + + // Try to uncache it without caching it first + setHeartbeatResponse(uncacheBlocks(locs)); + + GenericTestUtils.waitFor(new Supplier<Boolean>() { + @Override + public Boolean get() { + return fsd.getNumBlocksFailedToUncache() > 0; + } + }, 100, 10000); + } } Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpFileSystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpFileSystem.java?rev=1540910&r1=1540909&r2=1540910&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpFileSystem.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpFileSystem.java Tue Nov 12 01:16:10 2013 @@ -28,6 +28,7 @@ import java.net.HttpURLConnection; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; +import java.net.URLConnection; import java.util.Random; import org.apache.commons.logging.impl.Log4JLogger; @@ -40,15 +41,16 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.MiniDFSCluster.Builder; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; -import org.apache.hadoop.hdfs.web.HftpFileSystem; -import org.apache.hadoop.hdfs.web.HsftpFileSystem; import org.apache.hadoop.util.ServletUtil; import org.apache.log4j.Level; -import org.junit.*; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; public class TestHftpFileSystem { private static final Random RAN = new Random(); @@ -65,32 +67,24 @@ public class TestHftpFileSystem { new Path("/foo;bar"), // URI does not encode, Request#getPathInfo returns verbatim - new Path("/foo+"), - new Path("/foo+bar/foo+bar"), - new Path("/foo=bar/foo=bar"), - new Path("/foo,bar/foo,bar"), - new Path("/foo@bar/foo@bar"), - new Path("/foo&bar/foo&bar"), - new Path("/foo$bar/foo$bar"), - new Path("/foo_bar/foo_bar"), - new Path("/foo~bar/foo~bar"), - new Path("/foo.bar/foo.bar"), - new Path("/foo../bar/foo../bar"), - new Path("/foo.../bar/foo.../bar"), + new Path("/foo+"), new Path("/foo+bar/foo+bar"), + new Path("/foo=bar/foo=bar"), new Path("/foo,bar/foo,bar"), + new Path("/foo@bar/foo@bar"), new Path("/foo&bar/foo&bar"), + new Path("/foo$bar/foo$bar"), new Path("/foo_bar/foo_bar"), + new Path("/foo~bar/foo~bar"), new Path("/foo.bar/foo.bar"), + new Path("/foo../bar/foo../bar"), new Path("/foo.../bar/foo.../bar"), new Path("/foo'bar/foo'bar"), new Path("/foo#bar/foo#bar"), new Path("/foo!bar/foo!bar"), // HDFS file names may not contain ":" // URI percent encodes, Request#getPathInfo decodes - new Path("/foo bar/foo bar"), - new Path("/foo?bar/foo?bar"), - new Path("/foo\">bar/foo\">bar"), - }; + new Path("/foo bar/foo bar"), new Path("/foo?bar/foo?bar"), + new Path("/foo\">bar/foo\">bar"), }; @BeforeClass public static void setUp() throws IOException { - ((Log4JLogger)HftpFileSystem.LOG).getLogger().setLevel(Level.ALL); + ((Log4JLogger) HftpFileSystem.LOG).getLogger().setLevel(Level.ALL); final long seed = RAN.nextLong(); System.out.println("seed=" + seed); @@ -99,8 +93,8 @@ public class TestHftpFileSystem { config = new Configuration(); cluster = new MiniDFSCluster.Builder(config).numDataNodes(2).build(); blockPoolId = cluster.getNamesystem().getBlockPoolId(); - hftpUri = - "hftp://" + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); + hftpUri = "hftp://" + + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); } @AfterClass @@ -140,7 +134,8 @@ public class TestHftpFileSystem { // Check the file status matches the path. Hftp returns a FileStatus // with the entire URI, extract the path part. - assertEquals(p, new Path(hftpFs.getFileStatus(p).getPath().toUri().getPath())); + assertEquals(p, new Path(hftpFs.getFileStatus(p).getPath().toUri() + .getPath())); // Test list status (listPath servlet) assertEquals(1, hftpFs.listStatus(p).length); @@ -158,21 +153,20 @@ public class TestHftpFileSystem { if (hdfs.exists(path)) { hdfs.delete(path, true); } - FSDataOutputStream out = hdfs.create(path, (short)1); + FSDataOutputStream out = hdfs.create(path, (short) 1); out.writeBytes("0123456789"); out.close(); // Get the path's block location so we can determine // if we were redirected to the right DN. - BlockLocation[] locations = - hdfs.getFileBlockLocations(path, 0, 10); + BlockLocation[] locations = hdfs.getFileBlockLocations(path, 0, 10); String xferAddr = locations[0].getNames()[0]; // Connect to the NN to get redirected URL u = hftpFs.getNamenodeURL( "/data" + ServletUtil.encodePath(path.toUri().getPath()), "ugi=userx,groupy"); - HttpURLConnection conn = (HttpURLConnection)u.openConnection(); + HttpURLConnection conn = (HttpURLConnection) u.openConnection(); HttpURLConnection.setFollowRedirects(true); conn.connect(); conn.getInputStream(); @@ -181,15 +175,15 @@ public class TestHftpFileSystem { // Find the datanode that has the block according to locations // and check that the URL was redirected to this DN's info port for (DataNode node : cluster.getDataNodes()) { - DatanodeRegistration dnR = - DataNodeTestUtils.getDNRegistrationForBP(node, blockPoolId); + DatanodeRegistration dnR = DataNodeTestUtils.getDNRegistrationForBP(node, + blockPoolId); if (dnR.getXferAddr().equals(xferAddr)) { checked = true; assertEquals(dnR.getInfoPort(), conn.getURL().getPort()); } } - assertTrue("The test never checked that location of " + - "the block and hftp desitnation are the same", checked); + assertTrue("The test never checked that location of " + + "the block and hftp desitnation are the same", checked); } /** @@ -260,7 +254,7 @@ public class TestHftpFileSystem { os.writeBytes("0123456789"); os.close(); - // ByteRangeInputStream delays opens until reads. Make sure it doesn't + // ByteRangeInputStream delays opens until reads. Make sure it doesn't // open a closed stream that has never been opened FSDataInputStream in = hftpFs.open(testFile); in.close(); @@ -298,16 +292,15 @@ public class TestHftpFileSystem { URI uri = URI.create("hftp://localhost"); HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf); - assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, fs.getDefaultPort()); + assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, + fs.getDefaultPort()); assertEquals(uri, fs.getUri()); // HFTP uses http to get the token so canonical service name should // return the http port. - assertEquals( - "127.0.0.1:" + DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, - fs.getCanonicalServiceName() - ); + assertEquals("127.0.0.1:" + DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, + fs.getCanonicalServiceName()); } @Test @@ -324,10 +317,7 @@ public class TestHftpFileSystem { // HFTP uses http to get the token so canonical service name should // return the http port. - assertEquals( - "127.0.0.1:123", - fs.getCanonicalServiceName() - ); + assertEquals("127.0.0.1:123", fs.getCanonicalServiceName()); } @Test @@ -336,13 +326,11 @@ public class TestHftpFileSystem { URI uri = URI.create("hftp://localhost:123"); HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf); - assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, fs.getDefaultPort()); + assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, + fs.getDefaultPort()); assertEquals(uri, fs.getUri()); - assertEquals( - "127.0.0.1:123", - fs.getCanonicalServiceName() - ); + assertEquals("127.0.0.1:123", fs.getCanonicalServiceName()); } @Test @@ -356,13 +344,20 @@ public class TestHftpFileSystem { assertEquals(123, fs.getDefaultPort()); assertEquals(uri, fs.getUri()); - assertEquals( - "127.0.0.1:789", - fs.getCanonicalServiceName() - ); + assertEquals("127.0.0.1:789", fs.getCanonicalServiceName()); } - /// + @Test + public void testTimeout() throws IOException { + Configuration conf = new Configuration(); + URI uri = URI.create("hftp://localhost"); + HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf); + URLConnection conn = fs.connectionFactory.openConnection(new URL("http://localhost")); + assertEquals(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT, conn.getConnectTimeout()); + assertEquals(URLConnectionFactory.DEFAULT_SOCKET_TIMEOUT, conn.getReadTimeout()); + } + + // / @Test public void testHsftpDefaultPorts() throws IOException { @@ -370,13 +365,12 @@ public class TestHftpFileSystem { URI uri = URI.create("hsftp://localhost"); HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf); - assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultPort()); + assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, + fs.getDefaultPort()); assertEquals(uri, fs.getUri()); - assertEquals( - "127.0.0.1:"+DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, - fs.getCanonicalServiceName() - ); + assertEquals("127.0.0.1:" + DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, + fs.getCanonicalServiceName()); } @Test @@ -391,10 +385,7 @@ public class TestHftpFileSystem { assertEquals(456, fs.getDefaultPort()); assertEquals(uri, fs.getUri()); - assertEquals( - "127.0.0.1:456", - fs.getCanonicalServiceName() - ); + assertEquals("127.0.0.1:456", fs.getCanonicalServiceName()); } @Test @@ -403,13 +394,11 @@ public class TestHftpFileSystem { URI uri = URI.create("hsftp://localhost:123"); HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf); - assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultPort()); + assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, + fs.getDefaultPort()); assertEquals(uri, fs.getUri()); - assertEquals( - "127.0.0.1:123", - fs.getCanonicalServiceName() - ); + assertEquals("127.0.0.1:123", fs.getCanonicalServiceName()); } @Test @@ -424,9 +413,6 @@ public class TestHftpFileSystem { assertEquals(456, fs.getDefaultPort()); assertEquals(uri, fs.getUri()); - assertEquals( - "127.0.0.1:789", - fs.getCanonicalServiceName() - ); + assertEquals("127.0.0.1:789", fs.getCanonicalServiceName()); } }