Author: szetszwo
Date: Tue Mar 13 22:52:17 2012
New Revision: 1300392
URL: http://svn.apache.org/viewvc?rev=1300392&view=rev
Log:
HDFS-3082. Clean up FSDatasetInterface and change DataNode.data to package
private.
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1300392&r1=1300391&r2=1300392&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Mar 13
22:52:17 2012
@@ -225,6 +225,9 @@ Release 0.23.3 - UNRELEASED
HDFS-2731. Add command to bootstrap the Standby Node's name directories
from the Active NameNode. (todd)
+ HDFS-3082. Clean up FSDatasetInterface and change DataNode.data to package
+ private. (szetszwo)
+
OPTIMIZATIONS
HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java?rev=1300392&r1=1300391&r2=1300392&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
Tue Mar 13 22:52:17 2012
@@ -29,21 +29,13 @@ import org.apache.hadoop.classification.
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import
org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
-import org.apache.hadoop.hdfs.server.common.Storage;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
-import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
@@ -51,16 +43,8 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import
org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
-import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
-import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
-import org.apache.hadoop.hdfs.server.protocol.StorageReport;
-import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.util.StringUtils;
import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
@@ -578,7 +562,7 @@ class BPOfferService {
dn.blockScanner.deleteBlocks(bcmd.getBlockPoolId(), toDelete);
}
// using global fsdataset
- dn.data.invalidate(bcmd.getBlockPoolId(), toDelete);
+ dn.getFSDataset().invalidate(bcmd.getBlockPoolId(), toDelete);
} catch(IOException e) {
dn.checkDiskError();
throw e;
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java?rev=1300392&r1=1300391&r2=1300392&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
Tue Mar 13 22:52:17 2012
@@ -219,9 +219,10 @@ class BlockSender implements java.io.Clo
(!is32Bit || length <= Integer.MAX_VALUE);
DataChecksum csum;
- if (!corruptChecksumOk || datanode.data.metaFileExists(block)) {
- checksumIn = new DataInputStream(new BufferedInputStream(datanode.data
- .getMetaDataInputStream(block),
HdfsConstants.IO_FILE_BUFFER_SIZE));
+ final InputStream metaIn = datanode.data.getMetaDataInputStream(block);
+ if (!corruptChecksumOk || metaIn != null) {
+ checksumIn = new DataInputStream(
+ new BufferedInputStream(metaIn,
HdfsConstants.IO_FILE_BUFFER_SIZE));
// read and handle the common header here. For now just a version
BlockMetadataHeader header =
BlockMetadataHeader.readHeader(checksumIn);
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1300392&r1=1300391&r2=1300392&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
Tue Mar 13 22:52:17 2012
@@ -231,7 +231,7 @@ public class DataNode extends Configured
volatile boolean shouldRun = true;
private BlockPoolManager blockPoolManager;
- public volatile FSDatasetInterface<? extends FSVolumeInterface> data = null;
+ volatile FSDatasetInterface<? extends FSVolumeInterface> data = null;
private String clusterId = null;
public final static String EMPTY_DEL_HINT = "";
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=1300392&r1=1300391&r2=1300392&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
Tue Mar 13 22:52:17 2012
@@ -1037,22 +1037,13 @@ class FSDataset implements FSDatasetInte
}
@Override // FSDatasetInterface
- public boolean metaFileExists(ExtendedBlock b) throws IOException {
- return getMetaFile(b).exists();
- }
-
- @Override // FSDatasetInterface
- public long getMetaDataLength(ExtendedBlock b) throws IOException {
- File checksumFile = getMetaFile(b);
- return checksumFile.length();
- }
-
- @Override // FSDatasetInterface
public MetaDataInputStream getMetaDataInputStream(ExtendedBlock b)
throws IOException {
- File checksumFile = getMetaFile(b);
- return new MetaDataInputStream(new FileInputStream(checksumFile),
- checksumFile.length());
+ final File meta = getMetaFile(b);
+ if (meta == null || !meta.exists()) {
+ return null;
+ }
+ return new MetaDataInputStream(new FileInputStream(meta), meta.length());
}
private final DataNode datanode;
@@ -1213,18 +1204,6 @@ class FSDataset implements FSDatasetInte
return f;
}
- @Override // FSDatasetInterface
- public InputStream getBlockInputStream(ExtendedBlock b)
- throws IOException {
- File f = getBlockFileNoExistsCheck(b);
- try {
- return new FileInputStream(f);
- } catch (FileNotFoundException fnfe) {
- throw new IOException("Block " + b + " is not valid. " +
- "Expected block file at " + f + " does not exist.");
- }
- }
-
/**
* Return the File associated with a block, without first
* checking that it exists. This should be used when the
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java?rev=1300392&r1=1300391&r2=1300392&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
Tue Mar 13 22:52:17 2012
@@ -39,8 +39,8 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.DataChecksum;
-import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
+import org.apache.hadoop.util.ReflectionUtils;
/**
* This is an interface for the underlying storage that stores blocks for
@@ -124,14 +124,6 @@ public interface FSDatasetInterface<V ex
File diskMetaFile, FSVolumeInterface vol);
/**
- * Returns the length of the metadata file of the specified block
- * @param b - the block for which the metadata length is desired
- * @return the length of the metadata file for the specified block.
- * @throws IOException
- */
- public long getMetaDataLength(ExtendedBlock b) throws IOException;
-
- /**
* This class provides the input stream and length of the metadata
* of a block
*
@@ -149,22 +141,13 @@ public interface FSDatasetInterface<V ex
}
/**
- * Returns metaData of block b as an input stream (and its length)
- * @param b - the block
- * @return the metadata input stream;
- * @throws IOException
- */
- public MetaDataInputStream getMetaDataInputStream(ExtendedBlock b)
- throws IOException;
-
- /**
- * Does the meta file exist for this block?
* @param b - the block
- * @return true of the metafile for specified block exits
+ * @return a stream if the meta-data of the block exists;
+ * otherwise, return null.
* @throws IOException
*/
- public boolean metaFileExists(ExtendedBlock b) throws IOException;
-
+ public MetaDataInputStream getMetaDataInputStream(ExtendedBlock b
+ ) throws IOException;
/**
* Returns the specified block's on-disk length (excluding metadata)
@@ -191,16 +174,7 @@ public interface FSDatasetInterface<V ex
/**
* @return the generation stamp stored with the block.
*/
- public Block getStoredBlock(String bpid, long blkid)
- throws IOException;
-
- /**
- * Returns an input stream to read the contents of the specified block
- * @param b
- * @return an input stream to read the contents of the specified block
- * @throws IOException
- */
- public InputStream getBlockInputStream(ExtendedBlock b) throws IOException;
+ public Block getStoredBlock(String bpid, long blkid) throws IOException;
/**
* Returns an input stream at specified offset of the specified block
@@ -408,11 +382,6 @@ public interface FSDatasetInterface<V ex
*/
public void checkDataDir() throws DiskErrorException;
- /**
- * Stringifies the name of the storage
- */
- public String toString();
-
/**
* Shutdown the FSDataset
*/
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1300392&r1=1300391&r2=1300392&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
Tue Mar 13 22:52:17 2012
@@ -1744,7 +1744,7 @@ public class MiniDFSCluster {
// If datanode dataset is not initialized then wait
for (DataNodeProperties dn : dataNodes) {
- if (dn.datanode.data == null) {
+ if (DataNodeTestUtils.getFSDataset(dn.datanode) == null) {
return true;
}
}
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java?rev=1300392&r1=1300391&r2=1300392&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
Tue Mar 13 22:52:17 2012
@@ -272,8 +272,8 @@ public class TestFileAppend3 extends jun
}
for(DatanodeInfo datanodeinfo : lb.getLocations()) {
final DataNode dn = cluster.getDataNode(datanodeinfo.getIpcPort());
- final Block metainfo = dn.data.getStoredBlock(blk.getBlockPoolId(),
- blk.getBlockId());
+ final Block metainfo =
DataNodeTestUtils.getFSDataset(dn).getStoredBlock(
+ blk.getBlockPoolId(), blk.getBlockId());
assertEquals(size, metainfo.getNumBytes());
}
}
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java?rev=1300392&r1=1300391&r2=1300392&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
Tue Mar 13 22:52:17 2012
@@ -846,7 +846,8 @@ public class TestFileCreation extends ju
for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) {
DataNode datanode = cluster.getDataNode(datanodeinfo.ipcPort);
ExtendedBlock blk = locatedblock.getBlock();
- Block b = datanode.data.getStoredBlock(blk.getBlockPoolId(),
blk.getBlockId());
+ Block b = DataNodeTestUtils.getFSDataset(datanode).getStoredBlock(
+ blk.getBlockPoolId(), blk.getBlockId());
final File blockfile = DataNodeTestUtils.getFile(datanode,
blk.getBlockPoolId(), b.getBlockId());
System.out.println("blockfile=" + blockfile);
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java?rev=1300392&r1=1300391&r2=1300392&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
Tue Mar 13 22:52:17 2012
@@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.protocol.E
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.TestInterDatanodeProtocol;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@@ -117,8 +118,8 @@ public class TestLeaseRecovery extends j
dfs.dfs.getNamenode(), filestr).getBlock();
long currentGS = lastblock.getGenerationStamp();
for(int i = 0; i < REPLICATION_NUM; i++) {
- updatedmetainfo[i] = datanodes[i].data.getStoredBlock(lastblock
- .getBlockPoolId(), lastblock.getBlockId());
+ updatedmetainfo[i] =
DataNodeTestUtils.getFSDataset(datanodes[i]).getStoredBlock(
+ lastblock.getBlockPoolId(), lastblock.getBlockId());
assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId());
assertEquals(oldSize, updatedmetainfo[i].getNumBytes());
assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp());
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java?rev=1300392&r1=1300391&r2=1300392&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
Tue Mar 13 22:52:17 2012
@@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.protocol.L
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
@@ -211,7 +212,8 @@ public class TestShortCircuitLocalRead {
//This should succeed
BlockLocalPathInfo blpi = proxy.getBlockLocalPathInfo(blk, token);
- Assert.assertEquals(dn.data.getBlockLocalPathInfo(blk).getBlockPath(),
+ Assert.assertEquals(
+
DataNodeTestUtils.getFSDataset(dn).getBlockLocalPathInfo(blk).getBlockPath(),
blpi.getBlockPath());
// Now try with a not allowed user.
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1300392&r1=1300391&r2=1300392&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
Tue Mar 13 22:52:17 2012
@@ -667,9 +667,8 @@ public class SimulatedFSDataset
return binfo;
}
- @Override // FSDatasetInterface
- public synchronized InputStream getBlockInputStream(ExtendedBlock b)
- throws IOException {
+ synchronized InputStream getBlockInputStream(ExtendedBlock b
+ ) throws IOException {
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
@@ -694,15 +693,9 @@ public class SimulatedFSDataset
throw new IOException("Not supported");
}
- /**
- * Returns metaData of block b as an input stream
- * @param b - the block for which the metadata is desired
- * @return metaData of block b as an input stream
- * @throws IOException - block does not exist or problems accessing
- * the meta file
- */
- private synchronized InputStream getMetaDataInStream(ExtendedBlock b)
- throws IOException {
+ @Override // FSDatasetInterface
+ public synchronized MetaDataInputStream getMetaDataInputStream(ExtendedBlock
b
+ ) throws IOException {
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
@@ -712,40 +705,11 @@ public class SimulatedFSDataset
throw new IOException("Block " + b +
" is being written, its meta cannot be read");
}
- return binfo.getMetaIStream();
- }
-
- @Override // FSDatasetInterface
- public synchronized long getMetaDataLength(ExtendedBlock b)
- throws IOException {
- final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
- BInfo binfo = map.get(b.getLocalBlock());
- if (binfo == null) {
- throw new IOException("No such Block " + b );
- }
- if (!binfo.finalized) {
- throw new IOException("Block " + b +
- " is being written, its metalength cannot be read");
- }
- return binfo.getMetaIStream().getLength();
- }
-
- @Override // FSDatasetInterface
- public MetaDataInputStream getMetaDataInputStream(ExtendedBlock b)
- throws IOException {
- return new MetaDataInputStream(getMetaDataInStream(b),
- getMetaDataLength(b));
- }
-
- @Override // FSDatasetInterface
- public synchronized boolean metaFileExists(ExtendedBlock b) throws
IOException {
- if (!isValidBlock(b)) {
- throw new IOException("Block " + b +
- " is valid, and cannot be written to.");
- }
- return true; // crc exists for all valid blocks
+ final SimulatedInputStream sin = binfo.getMetaIStream();
+ return new MetaDataInputStream(sin, sin.getLength());
}
+ @Override
public void checkDataDir() throws DiskErrorException {
// nothing to check for simulated data set
}
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java?rev=1300392&r1=1300391&r2=1300392&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
Tue Mar 13 22:52:17 2012
@@ -102,7 +102,7 @@ public class TestSimulatedFSDataset exte
final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
ExtendedBlock b = new ExtendedBlock(bpid, 1, 5, 0);
try {
- assertFalse(fsdataset.metaFileExists(b));
+ assertTrue(fsdataset.getMetaDataInputStream(b) == null);
assertTrue("Expected an IO exception", false);
} catch (IOException e) {
// ok - as expected