Author: szetszwo
Date: Mon Mar 26 21:09:14 2012
New Revision: 1305598
URL: http://svn.apache.org/viewvc?rev=1305598&view=rev
Log:
Revert 1305590 for HDFS-3089.
Added:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java
- copied unchanged from r1305589,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockVolumeChoosingPolicy.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
- copied unchanged from r1305589,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RollingLogs.java
- copied unchanged from r1305589,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RollingLogs.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java
- copied unchanged from r1305589,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/RoundRobinVolumesPolicy.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java
- copied unchanged from r1305589,
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java
Removed:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/RollingLogs.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/RoundRobinVolumeChoosingPolicy.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/VolumeChoosingPolicy.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1305598&r1=1305597&r2=1305598&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Mon Mar 26
21:09:14 2012
@@ -263,9 +263,6 @@ Release 0.23.3 - UNRELEASED
HDFS-3071. haadmin failover command does not provide enough detail when
target NN is not ready to be active. (todd)
- HDFS-3089. Move FSDatasetInterface and the related classes to a package.
- (szetszwo)
-
OPTIMIZATIONS
HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1305598&r1=1305597&r2=1305598&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
Mon Mar 26 21:09:14 2012
@@ -237,6 +237,9 @@ public class DFSConfigKeys extends Commo
public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 0;
public static final String DFS_DATANODE_TRANSFERTO_ALLOWED_KEY =
"dfs.datanode.transferTo.allowed";
public static final boolean DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT = true;
+ public static final String DFS_DATANODE_BLOCKVOLUMECHOICEPOLICY =
"dfs.datanode.block.volume.choice.policy";
+ public static final String DFS_DATANODE_BLOCKVOLUMECHOICEPOLICY_DEFAULT =
+ "org.apache.hadoop.hdfs.server.datanode.RoundRobinVolumesPolicy";
public static final String DFS_HEARTBEAT_INTERVAL_KEY =
"dfs.heartbeat.interval";
public static final long DFS_HEARTBEAT_INTERVAL_DEFAULT = 3;
public static final String DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY =
"dfs.namenode.decommission.interval";
@@ -302,7 +305,6 @@ public class DFSConfigKeys extends Commo
//Keys with no defaults
public static final String DFS_DATANODE_PLUGINS_KEY =
"dfs.datanode.plugins";
public static final String DFS_DATANODE_FSDATASET_FACTORY_KEY =
"dfs.datanode.fsdataset.factory";
- public static final String
DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY =
"dfs.datanode.fsdataset.volume.choosing.policy";
public static final String DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY =
"dfs.datanode.socket.write.timeout";
public static final String DFS_DATANODE_STARTUP_KEY =
"dfs.datanode.startup";
public static final String DFS_NAMENODE_PLUGINS_KEY =
"dfs.namenode.plugins";
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java?rev=1305598&r1=1305597&r2=1305598&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
Mon Mar 26 21:09:14 2012
@@ -44,9 +44,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.RollingLogs;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
@@ -74,7 +72,7 @@ class BlockPoolSliceScanner {
private final AtomicLong lastScanTime = new AtomicLong();
private final DataNode datanode;
- private final FsDatasetSpi<? extends FsVolumeSpi> dataset;
+ private final FSDatasetInterface<? extends FsVolumeSpi> dataset;
private final SortedSet<BlockScanInfo> blockInfoSet
= new TreeSet<BlockScanInfo>();
@@ -136,7 +134,8 @@ class BlockPoolSliceScanner {
}
BlockPoolSliceScanner(String bpid, DataNode datanode,
- FsDatasetSpi<? extends FsVolumeSpi> dataset, Configuration conf) {
+ FSDatasetInterface<? extends FsVolumeSpi> dataset,
+ Configuration conf) {
this.datanode = datanode;
this.dataset = dataset;
this.blockPoolId = bpid;
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java?rev=1305598&r1=1305597&r2=1305598&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
Mon Mar 26 21:09:14 2012
@@ -31,7 +31,6 @@ import org.apache.hadoop.classification.
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
/**
@@ -44,7 +43,7 @@ import org.apache.hadoop.hdfs.server.dat
public class DataBlockScanner implements Runnable {
public static final Log LOG = LogFactory.getLog(DataBlockScanner.class);
private final DataNode datanode;
- private final FsDatasetSpi<? extends FsVolumeSpi> dataset;
+ private final FSDatasetInterface<? extends FsVolumeSpi> dataset;
private final Configuration conf;
/**
@@ -56,7 +55,7 @@ public class DataBlockScanner implements
Thread blockScannerThread = null;
DataBlockScanner(DataNode datanode,
- FsDatasetSpi<? extends FsVolumeSpi> dataset,
+ FSDatasetInterface<? extends FsVolumeSpi> dataset,
Configuration conf) {
this.datanode = datanode;
this.dataset = dataset;
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1305598&r1=1305597&r2=1305598&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
Mon Mar 26 21:09:14 2012
@@ -122,7 +122,6 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.common.Util;
import
org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
import
org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMethods;
@@ -232,7 +231,7 @@ public class DataNode extends Configured
volatile boolean shouldRun = true;
private BlockPoolManager blockPoolManager;
- volatile FsDatasetSpi<? extends FsVolumeSpi> data = null;
+ volatile FSDatasetInterface<? extends FsVolumeSpi> data = null;
private String clusterId = null;
public final static String EMPTY_DEL_HINT = "";
@@ -810,8 +809,8 @@ public class DataNode extends Configured
* handshake with the the first namenode is completed.
*/
private void initStorage(final NamespaceInfo nsInfo) throws IOException {
- final FsDatasetSpi.Factory<? extends FsDatasetSpi<?>> factory
- = FsDatasetSpi.Factory.getFactory(conf);
+ final FSDatasetInterface.Factory<? extends FSDatasetInterface<?>> factory
+ = FSDatasetInterface.Factory.getFactory(conf);
if (!factory.isSimulated()) {
final StartupOption startOpt = getStartupOption(conf);
@@ -829,7 +828,7 @@ public class DataNode extends Configured
synchronized(this) {
if (data == null) {
- data = factory.newInstance(this, storage, conf);
+ data = factory.createFSDatasetInterface(this, storage, conf);
}
}
}
@@ -1696,7 +1695,7 @@ public class DataNode extends Configured
*
* @return the fsdataset that stores the blocks
*/
- FsDatasetSpi<?> getFSDataset() {
+ FSDatasetInterface<?> getFSDataset() {
return data;
}
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java?rev=1305598&r1=1305597&r2=1305598&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
Mon Mar 26 21:09:14 2012
@@ -43,7 +43,6 @@ import org.apache.hadoop.hdfs.DFSConfigK
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.util.Daemon;
@@ -56,7 +55,7 @@ public class DirectoryScanner implements
private static final Log LOG = LogFactory.getLog(DirectoryScanner.class);
private final DataNode datanode;
- private final FsDatasetSpi<?> dataset;
+ private final FSDatasetInterface<?> dataset;
private final ExecutorService reportCompileThreadPool;
private final ScheduledExecutorService masterThread;
private final long scanPeriodMsecs;
@@ -220,7 +219,7 @@ public class DirectoryScanner implements
}
}
- DirectoryScanner(DataNode dn, FsDatasetSpi<?> dataset, Configuration conf) {
+ DirectoryScanner(DataNode dn, FSDatasetInterface<?> dataset, Configuration
conf) {
this.datanode = dn;
this.dataset = dataset;
int interval =
conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
@@ -412,7 +411,7 @@ public class DirectoryScanner implements
}
/** Is the given volume still valid in the dataset? */
- private static boolean isValid(final FsDatasetSpi<?> dataset,
+ private static boolean isValid(final FSDatasetInterface<?> dataset,
final FsVolumeSpi volume) {
for (FsVolumeSpi vol : dataset.getVolumes()) {
if (vol == volume) {
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=1305598&r1=1305597&r2=1305598&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
Mon Mar 26 21:09:14 2012
@@ -61,14 +61,10 @@ import org.apache.hadoop.hdfs.protocol.H
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.RollingLogs;
-import
org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
import
org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
@@ -86,13 +82,13 @@ import org.apache.hadoop.util.Reflection
*
***************************************************/
@InterfaceAudience.Private
-public class FSDataset implements FsDatasetSpi<FSDataset.FSVolume> {
+class FSDataset implements FSDatasetInterface<FSDataset.FSVolume> {
/**
* A factory for creating FSDataset objects.
*/
- public static class Factory extends FsDatasetSpi.Factory<FSDataset> {
+ static class Factory extends FSDatasetInterface.Factory<FSDataset> {
@Override
- public FSDataset newInstance(DataNode datanode,
+ public FSDataset createFSDatasetInterface(DataNode datanode,
DataStorage storage, Configuration conf) throws IOException {
return new FSDataset(datanode, storage, conf);
}
@@ -827,11 +823,11 @@ public class FSDataset implements FsData
*/
private volatile List<FSVolume> volumes = null;
- final VolumeChoosingPolicy<FSVolume> blockChooser;
+ BlockVolumeChoosingPolicy<FSVolume> blockChooser;
int numFailedVolumes;
FSVolumeSet(List<FSVolume> volumes, int failedVols,
- VolumeChoosingPolicy<FSVolume> blockChooser) {
+ BlockVolumeChoosingPolicy<FSVolume> blockChooser) {
this.volumes = Collections.unmodifiableList(volumes);
this.blockChooser = blockChooser;
this.numFailedVolumes = failedVols;
@@ -1022,7 +1018,7 @@ public class FSDataset implements FsData
}
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public List<FSVolume> getVolumes() {
return volumes.volumes;
}
@@ -1033,7 +1029,7 @@ public class FSDataset implements FsData
return r != null? (FSVolume)r.getVolume(): null;
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized Block getStoredBlock(String bpid, long blkid)
throws IOException {
File blockfile = getFile(bpid, blkid);
@@ -1070,7 +1066,7 @@ public class FSDataset implements FsData
return null;
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
throws IOException {
final File meta = getMetaFile(b);
@@ -1129,11 +1125,11 @@ public class FSDataset implements FsData
volumeMap = new ReplicasMap(this);
@SuppressWarnings("unchecked")
- final VolumeChoosingPolicy<FSVolume> blockChooserImpl =
+ final BlockVolumeChoosingPolicy<FSVolume> blockChooserImpl =
ReflectionUtils.newInstance(conf.getClass(
- DFSConfigKeys.DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY,
- RoundRobinVolumeChoosingPolicy.class,
- VolumeChoosingPolicy.class), conf);
+ DFSConfigKeys.DFS_DATANODE_BLOCKVOLUMECHOICEPOLICY,
+ RoundRobinVolumesPolicy.class,
+ BlockVolumeChoosingPolicy.class), conf);
volumes = new FSVolumeSet(volArray, volsFailed, blockChooserImpl);
volumes.getVolumeMap(volumeMap);
@@ -1168,7 +1164,7 @@ public class FSDataset implements FsData
/**
* Return true - if there are still valid volumes on the DataNode.
*/
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public boolean hasEnoughResource() {
return getVolumes().size() >= validVolsRequired;
}
@@ -1203,7 +1199,7 @@ public class FSDataset implements FsData
/**
* Find the block's on-disk length
*/
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public long getLength(ExtendedBlock b) throws IOException {
return getBlockFile(b).length();
}
@@ -1247,7 +1243,7 @@ public class FSDataset implements FsData
return f;
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public InputStream getBlockInputStream(ExtendedBlock b,
long seekOffset) throws IOException {
File blockFile = getBlockFileNoExistsCheck(b);
@@ -1305,7 +1301,7 @@ public class FSDataset implements FsData
/**
* Returns handles to the block file and its metadata file
*/
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized ReplicaInputStreams getTmpInputStreams(ExtendedBlock b,
long blkOffset, long ckoff) throws IOException {
ReplicaInfo info = getReplicaInfo(b);
@@ -1410,7 +1406,7 @@ public class FSDataset implements FsData
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized ReplicaInPipelineInterface append(ExtendedBlock b,
long newGS, long expectedBlockLen) throws IOException {
// If the block was successfully finalized because all packets
@@ -1551,7 +1547,7 @@ public class FSDataset implements FsData
return replicaInfo;
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized ReplicaInPipelineInterface recoverAppend(ExtendedBlock b,
long newGS, long expectedBlockLen) throws IOException {
DataNode.LOG.info("Recover failed append to " + b);
@@ -1568,7 +1564,7 @@ public class FSDataset implements FsData
}
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public void recoverClose(ExtendedBlock b, long newGS,
long expectedBlockLen) throws IOException {
DataNode.LOG.info("Recover failed close " + b);
@@ -1610,7 +1606,7 @@ public class FSDataset implements FsData
}
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized ReplicaInPipelineInterface createRbw(ExtendedBlock b)
throws IOException {
ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(),
@@ -1630,7 +1626,7 @@ public class FSDataset implements FsData
return newReplicaInfo;
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized ReplicaInPipelineInterface recoverRbw(ExtendedBlock b,
long newGS, long minBytesRcvd, long maxBytesRcvd)
throws IOException {
@@ -1675,7 +1671,7 @@ public class FSDataset implements FsData
return rbw;
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized ReplicaInPipelineInterface convertTemporaryToRbw(
final ExtendedBlock b) throws IOException {
final long blockId = b.getBlockId();
@@ -1736,7 +1732,7 @@ public class FSDataset implements FsData
return rbw;
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized ReplicaInPipelineInterface createTemporary(ExtendedBlock
b)
throws IOException {
ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(),
b.getBlockId());
@@ -1760,7 +1756,7 @@ public class FSDataset implements FsData
* Sets the offset in the meta file so that the
* last checksum will be overwritten.
*/
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public void adjustCrcChannelPosition(ExtendedBlock b, ReplicaOutputStreams
streams,
int checksumSize) throws IOException {
FileOutputStream file = (FileOutputStream) streams.getChecksumOut();
@@ -1785,7 +1781,7 @@ public class FSDataset implements FsData
/**
* Complete the block write!
*/
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized void finalizeBlock(ExtendedBlock b) throws IOException {
ReplicaInfo replicaInfo = getReplicaInfo(b);
if (replicaInfo.getState() == ReplicaState.FINALIZED) {
@@ -1822,7 +1818,7 @@ public class FSDataset implements FsData
/**
* Remove the temporary block file (if any)
*/
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized void unfinalizeBlock(ExtendedBlock b) throws IOException
{
ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(),
b.getLocalBlock());
@@ -1867,7 +1863,7 @@ public class FSDataset implements FsData
/**
* Generates a block report from the in-memory block map.
*/
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public BlockListAsLongs getBlockReport(String bpid) {
int size = volumeMap.size(bpid);
ArrayList<ReplicaInfo> finalized = new ArrayList<ReplicaInfo>(size);
@@ -1918,7 +1914,7 @@ public class FSDataset implements FsData
* Check whether the given block is a valid one.
* valid means finalized
*/
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public boolean isValidBlock(ExtendedBlock b) {
return isValid(b, ReplicaState.FINALIZED);
}
@@ -1926,7 +1922,7 @@ public class FSDataset implements FsData
/**
* Check whether the given block is a valid RBW.
*/
- @Override // {@link FsDatasetSpi}
+ @Override // {@link FSDatasetInterface}
public boolean isValidRbw(final ExtendedBlock b) {
return isValid(b, ReplicaState.RBW);
}
@@ -1991,7 +1987,7 @@ public class FSDataset implements FsData
* could lazily garbage-collect the block, but why bother?
* just get rid of it.
*/
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public void invalidate(String bpid, Block invalidBlks[]) throws IOException {
boolean error = false;
for (int i = 0; i < invalidBlks.length; i++) {
@@ -2057,7 +2053,7 @@ public class FSDataset implements FsData
datanode.notifyNamenodeDeletedBlock(block);
}
- @Override // {@link FsDatasetSpi}
+ @Override // {@link FSDatasetInterface}
public synchronized boolean contains(final ExtendedBlock block) {
final long blockId = block.getLocalBlock().getBlockId();
return getFile(block.getBlockPoolId(), blockId) != null;
@@ -2082,7 +2078,7 @@ public class FSDataset implements FsData
* to these volumes
* @throws DiskErrorException
*/
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public void checkDataDir() throws DiskErrorException {
long totalBlocks=0, removedBlocks=0;
List<FSVolume> failedVols = volumes.checkDirs();
@@ -2126,7 +2122,7 @@ public class FSDataset implements FsData
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public String toString() {
return "FSDataset{dirpath='"+volumes+"'}";
}
@@ -2157,7 +2153,7 @@ public class FSDataset implements FsData
DataNode.LOG.info("Registered FSDatasetState MBean");
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public void shutdown() {
if (mbeanName != null)
MBeans.unregister(mbeanName);
@@ -2338,7 +2334,7 @@ public class FSDataset implements FsData
/**
* @deprecated use {@link #fetchReplicaInfo(String, long)} instead.
*/
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
@Deprecated
public ReplicaInfo getReplica(String bpid, long blockId) {
return volumeMap.get(bpid, blockId);
@@ -2350,7 +2346,7 @@ public class FSDataset implements FsData
return r == null? "null": r.toString();
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized ReplicaRecoveryInfo initReplicaRecovery(
RecoveringBlock rBlock) throws IOException {
return initReplicaRecovery(rBlock.getBlock().getBlockPoolId(),
@@ -2423,7 +2419,7 @@ public class FSDataset implements FsData
return rur.createInfo();
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized String updateReplicaUnderRecovery(
final ExtendedBlock oldBlock,
final long recoveryId,
@@ -2505,7 +2501,7 @@ public class FSDataset implements FsData
return finalizeReplica(bpid, rur);
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized long getReplicaVisibleLength(final ExtendedBlock block)
throws IOException {
final Replica replica = getReplicaInfo(block.getBlockPoolId(),
@@ -2588,7 +2584,7 @@ public class FSDataset implements FsData
return info;
}
- @Override //FsDatasetSpi
+ @Override //FSDatasetInterface
public synchronized void deleteBlockPool(String bpid, boolean force)
throws IOException {
if (!force) {
@@ -2606,7 +2602,7 @@ public class FSDataset implements FsData
}
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block)
throws IOException {
File datafile = getBlockFile(block);
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java?rev=1305598&r1=1305597&r2=1305598&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
Mon Mar 26 21:09:14 2012
@@ -25,7 +25,7 @@ import org.apache.hadoop.util.DataChecks
/**
* This defines the interface of a replica in Pipeline that's being written to
*/
-public interface ReplicaInPipelineInterface extends Replica {
+interface ReplicaInPipelineInterface extends Replica {
/**
* Set the number of bytes received
* @param bytesReceived number of bytes received
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1305598&r1=1305597&r2=1305598&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
Mon Mar 26 21:09:14 2012
@@ -429,6 +429,15 @@
</property>
<property>
+ <name>dfs.datanode.block.volume.choice.policy</name>
+ <value>org.apache.hadoop.hdfs.server.datanode.RoundRobinVolumesPolicy</value>
+ <description>The policy class to use to determine into which of the
+ datanode's available volumes a block must be written to. Default is a simple
+ round-robin policy that chooses volumes in a cyclic order.
+ </description>
+</property>
+
+<property>
<name>dfs.heartbeat.interval</name>
<value>3</value>
<description>Determines datanode heartbeat interval in seconds.</description>
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java?rev=1305598&r1=1305597&r2=1305598&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java
Mon Mar 26 21:09:14 2012
@@ -25,8 +25,8 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.namenode.CreateEditsLog;
import org.apache.hadoop.net.DNS;
@@ -125,7 +125,7 @@ public class DataNodeCluster {
} else if (args[i].equals("-simulated")) {
SimulatedFSDataset.setFactory(conf);
} else if (args[i].equals("-inject")) {
- if (!FsDatasetSpi.Factory.getFactory(conf).isSimulated()) {
+ if (!FSDatasetInterface.Factory.getFactory(conf).isSimulated()) {
System.out.print("-inject is valid only for simulated");
printUsageExit();
}
@@ -157,7 +157,8 @@ public class DataNodeCluster {
System.out.println("No name node address and port in config");
System.exit(-1);
}
- boolean simulated = FsDatasetSpi.Factory.getFactory(conf).isSimulated();
+ boolean simulated =
+ FSDatasetInterface.Factory.getFactory(conf).isSimulated();
System.out.println("Starting " + numDataNodes +
(simulated ? " Simulated " : " ") +
" Data Nodes that will connect to Name Node at " + nameNodeAdr);
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1305598&r1=1305597&r2=1305598&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
Mon Mar 26 21:09:14 2012
@@ -17,29 +17,6 @@
*/
package org.apache.hadoop.hdfs;
-import static
org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
-import static
org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY;
-import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
-import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
-import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICES;
-import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
-import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HOSTS;
-import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY;
-import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY;
-import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
-import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
-import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY;
-import static
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
import java.io.File;
@@ -66,6 +43,9 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
+
import org.apache.hadoop.ha.HAServiceProtocol;
import org.apache.hadoop.ha.HAServiceProtocolHelper;
import org.apache.hadoop.ha.ServiceFailedException;
@@ -77,20 +57,21 @@ import org.apache.hadoop.hdfs.protocol.C
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.DNSToSwitchMapping;
import org.apache.hadoop.net.NetUtils;
@@ -1821,7 +1802,7 @@ public class MiniDFSCluster {
throw new IndexOutOfBoundsException();
}
final DataNode dn = dataNodes.get(dataNodeIndex).datanode;
- final FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
+ final FSDatasetInterface<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
if (!(dataSet instanceof SimulatedFSDataset)) {
throw new IOException("injectBlocks is valid only for
SimilatedFSDataset");
}
@@ -1840,7 +1821,7 @@ public class MiniDFSCluster {
throw new IndexOutOfBoundsException();
}
final DataNode dn = dataNodes.get(dataNodeIndex).datanode;
- final FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
+ final FSDatasetInterface<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
if (!(dataSet instanceof SimulatedFSDataset)) {
throw new IOException("injectBlocks is valid only for
SimilatedFSDataset");
}
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java?rev=1305598&r1=1305597&r2=1305598&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
Mon Mar 26 21:09:14 2012
@@ -60,8 +60,8 @@ import org.apache.hadoop.hdfs.protocol.L
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.io.IOUtils;
@@ -211,7 +211,7 @@ public class TestFileCreation extends ju
// can't check capacities for real storage since the OS file system may
be changing under us.
if (simulatedStorage) {
DataNode dn = cluster.getDataNodes().get(0);
- FsDatasetSpi<?> dataset = DataNodeTestUtils.getFSDataset(dn);
+ FSDatasetInterface<?> dataset = DataNodeTestUtils.getFSDataset(dn);
assertEquals(fileSize, dataset.getDfsUsed());
assertEquals(SimulatedFSDataset.DEFAULT_CAPACITY-fileSize,
dataset.getRemaining());
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java?rev=1305598&r1=1305597&r2=1305598&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
Mon Mar 26 21:09:14 2012
@@ -24,7 +24,6 @@ import java.io.IOException;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
/**
@@ -49,7 +48,7 @@ public class DataNodeTestUtils {
*
* @return the fsdataset that stores the blocks
*/
- public static FsDatasetSpi<?> getFSDataset(DataNode dn) {
+ public static FSDatasetInterface<?> getFSDataset(DataNode dn) {
return dn.getFSDataset();
}
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1305598&r1=1305597&r2=1305598&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
Mon Mar 26 21:09:14 2012
@@ -39,12 +39,10 @@ import org.apache.hadoop.hdfs.protocol.B
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolumeSet;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.RollingLogs;
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
import
org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
@@ -67,10 +65,10 @@ import org.apache.hadoop.util.DiskChecke
*
* Note the synchronization is coarse grained - it is at each method.
*/
-public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
- static class Factory extends FsDatasetSpi.Factory<SimulatedFSDataset> {
+public class SimulatedFSDataset implements FSDatasetInterface<FsVolumeSpi> {
+ static class Factory extends FSDatasetInterface.Factory<SimulatedFSDataset> {
@Override
- public SimulatedFSDataset newInstance(DataNode datanode,
+ public SimulatedFSDataset createFSDatasetInterface(DataNode datanode,
DataStorage storage, Configuration conf) throws IOException {
return new SimulatedFSDataset(datanode, storage, conf);
}
@@ -429,7 +427,7 @@ public class SimulatedFSDataset implemen
return map;
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized void finalizeBlock(ExtendedBlock b) throws IOException {
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
BInfo binfo = map.get(b.getLocalBlock());
@@ -439,7 +437,7 @@ public class SimulatedFSDataset implemen
binfo.finalizeBlock(b.getBlockPoolId(), b.getNumBytes());
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized void unfinalizeBlock(ExtendedBlock b) {
if (isValidRbw(b)) {
blockMap.remove(b.getLocalBlock());
@@ -485,7 +483,7 @@ public class SimulatedFSDataset implemen
return storage.getNumFailedVolumes();
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized long getLength(ExtendedBlock b) throws IOException {
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
BInfo binfo = map.get(b.getLocalBlock());
@@ -515,7 +513,7 @@ public class SimulatedFSDataset implemen
return r == null? "null": r.toString();
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public Block getStoredBlock(String bpid, long blkid) throws IOException {
final Map<Block, BInfo> map = blockMap.get(bpid);
if (map != null) {
@@ -528,7 +526,7 @@ public class SimulatedFSDataset implemen
return null;
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized void invalidate(String bpid, Block[] invalidBlks)
throws IOException {
boolean error = false;
@@ -559,12 +557,12 @@ public class SimulatedFSDataset implemen
return map == null? null: map.get(b.getLocalBlock());
}
- @Override // {@link FsDatasetSpi}
+ @Override // {@link FSDatasetInterface}
public boolean contains(ExtendedBlock block) {
return getBInfo(block) != null;
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized boolean isValidBlock(ExtendedBlock b) {
final BInfo binfo = getBInfo(b);
return binfo != null && binfo.isFinalized();
@@ -582,7 +580,7 @@ public class SimulatedFSDataset implemen
return getStorageInfo();
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized ReplicaInPipelineInterface append(ExtendedBlock b,
long newGS, long expectedBlockLen) throws IOException {
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
@@ -595,7 +593,7 @@ public class SimulatedFSDataset implemen
return binfo;
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized ReplicaInPipelineInterface recoverAppend(ExtendedBlock b,
long newGS, long expectedBlockLen) throws IOException {
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
@@ -613,7 +611,7 @@ public class SimulatedFSDataset implemen
return binfo;
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public void recoverClose(ExtendedBlock b, long newGS, long expectedBlockLen)
throws IOException {
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
@@ -630,7 +628,7 @@ public class SimulatedFSDataset implemen
map.put(binfo.theBlock, binfo);
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized ReplicaInPipelineInterface recoverRbw(ExtendedBlock b,
long newGS, long minBytesRcvd, long maxBytesRcvd) throws IOException {
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
@@ -649,13 +647,13 @@ public class SimulatedFSDataset implemen
return binfo;
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized ReplicaInPipelineInterface createRbw(ExtendedBlock b)
throws IOException {
return createTemporary(b);
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized ReplicaInPipelineInterface createTemporary(ExtendedBlock
b)
throws IOException {
if (isValidBlock(b)) {
@@ -683,7 +681,7 @@ public class SimulatedFSDataset implemen
return binfo.getIStream();
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized InputStream getBlockInputStream(ExtendedBlock b,
long seekOffset) throws IOException {
InputStream result = getBlockInputStream(b);
@@ -692,13 +690,13 @@ public class SimulatedFSDataset implemen
}
/** Not supported */
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
long ckoff) throws IOException {
throw new IOException("Not supported");
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized LengthInputStream getMetaDataInputStream(ExtendedBlock b
) throws IOException {
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
@@ -719,7 +717,7 @@ public class SimulatedFSDataset implemen
// nothing to check for simulated data set
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public synchronized void adjustCrcChannelPosition(ExtendedBlock b,
ReplicaOutputStreams stream,
int checksumSize)
@@ -904,32 +902,32 @@ public class SimulatedFSDataset implemen
binfo.isFinalized()?ReplicaState.FINALIZED : ReplicaState.RBW);
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public String updateReplicaUnderRecovery(ExtendedBlock oldBlock,
long recoveryId,
long newlength) {
return storageId;
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public long getReplicaVisibleLength(ExtendedBlock block) {
return block.getNumBytes();
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public void addBlockPool(String bpid, Configuration conf) {
Map<Block, BInfo> map = new HashMap<Block, BInfo>();
blockMap.put(bpid, map);
storage.addBlockPool(bpid);
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public void shutdownBlockPool(String bpid) {
blockMap.remove(bpid);
storage.removeBlockPool(bpid);
}
- @Override // FsDatasetSpi
+ @Override // FSDatasetInterface
public void deleteBlockPool(String bpid, boolean force) {
return;
}
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java?rev=1305598&r1=1305597&r2=1305598&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
Mon Mar 26 21:09:14 2012
@@ -33,7 +33,6 @@ import org.apache.hadoop.hdfs.protocol.B
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import
org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
@@ -78,7 +77,7 @@ public class TestBPOfferService {
private NNHAStatusHeartbeat[] mockHaStatuses = new NNHAStatusHeartbeat[2];
private int heartbeatCounts[] = new int[2];
private DataNode mockDn;
- private FsDatasetSpi<?> mockFSDataset;
+ private FSDatasetInterface<?> mockFSDataset;
@Before
public void setupMocks() throws Exception {
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java?rev=1305598&r1=1305597&r2=1305598&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
Mon Mar 26 21:09:14 2012
@@ -29,7 +29,6 @@ import org.apache.hadoop.hdfs.HdfsConfig
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
import org.apache.hadoop.util.DataChecksum;
@@ -89,12 +88,12 @@ public class TestSimulatedFSDataset exte
public void testFSDatasetFactory() {
final Configuration conf = new Configuration();
- FsDatasetSpi.Factory<?> f = FsDatasetSpi.Factory.getFactory(conf);
+ FSDatasetInterface.Factory<?> f =
FSDatasetInterface.Factory.getFactory(conf);
assertEquals(FSDataset.Factory.class, f.getClass());
assertFalse(f.isSimulated());
SimulatedFSDataset.setFactory(conf);
- FsDatasetSpi.Factory<?> s = FsDatasetSpi.Factory.getFactory(conf);
+ FSDatasetInterface.Factory<?> s =
FSDatasetInterface.Factory.getFactory(conf);
assertEquals(SimulatedFSDataset.Factory.class, s.getClass());
assertTrue(s.isSimulated());
}