http://git-wip-us.apache.org/repos/asf/hadoop/blob/7527a599/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileWithStripedBlocksFeature.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileWithStripedBlocksFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileWithStripedBlocksFeature.java new file mode 100644 index 0000000..47445be --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileWithStripedBlocksFeature.java @@ -0,0 +1,112 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; + +/** + * Feature for file with striped blocks + */ +class FileWithStripedBlocksFeature implements INode.Feature { + private BlockInfoStriped[] blocks; + + FileWithStripedBlocksFeature() { + blocks = new BlockInfoStriped[0]; + } + + FileWithStripedBlocksFeature(BlockInfoStriped[] blocks) { + Preconditions.checkArgument(blocks != null); + this.blocks = blocks; + } + + BlockInfoStriped[] getBlocks() { + return this.blocks; + } + + void setBlock(int index, BlockInfoStriped blk) { + blocks[index] = blk; + } + + BlockInfoStriped getLastBlock() { + return blocks == null || blocks.length == 0 ? + null : blocks[blocks.length - 1]; + } + + int numBlocks() { + return blocks == null ? 0 : blocks.length; + } + + void updateBlockCollection(INodeFile file) { + if (blocks != null) { + for (BlockInfoStriped blk : blocks) { + blk.setBlockCollection(file); + } + } + } + + private void setBlocks(BlockInfoStriped[] blocks) { + this.blocks = blocks; + } + + void addBlock(BlockInfoStriped newBlock) { + if (this.blocks == null) { + this.setBlocks(new BlockInfoStriped[]{newBlock}); + } else { + int size = this.blocks.length; + BlockInfoStriped[] newlist = new BlockInfoStriped[size + 1]; + System.arraycopy(this.blocks, 0, newlist, 0, size); + newlist[size] = newBlock; + this.setBlocks(newlist); + } + } + + boolean removeLastBlock(Block oldblock) { + if (blocks == null || blocks.length == 0) { + return false; + } + int newSize = blocks.length - 1; + if (!blocks[newSize].equals(oldblock)) { + return false; + } + + //copy to a new list + BlockInfoStriped[] newlist = new BlockInfoStriped[newSize]; + System.arraycopy(blocks, 0, newlist, 0, newSize); + setBlocks(newlist); + return true; + } + + void truncateStripedBlocks(int n) { + final BlockInfoStriped[] newBlocks; + if (n == 0) { + newBlocks = new BlockInfoStriped[0]; + } else { + newBlocks = new BlockInfoStriped[n]; + System.arraycopy(getBlocks(), 0, newBlocks, 0, n); + } + // set new blocks + setBlocks(newBlocks); + } + + void clear() { + this.blocks = null; + } +}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7527a599/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 1858e0a..640fc57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION; import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID; import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID; @@ -37,12 +38,12 @@ import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; @@ -174,6 +175,31 @@ public class INodeFile extends INodeWithAdditionalFields && getXAttrFeature() == other.getXAttrFeature(); } + /* Start of StripedBlock Feature */ + + public final FileWithStripedBlocksFeature getStripedBlocksFeature() { + return getFeature(FileWithStripedBlocksFeature.class); + } + + public FileWithStripedBlocksFeature addStripedBlocksFeature() { + assert blocks == null || blocks.length == 0: + "The file contains contiguous blocks"; + assert !isWithStripedBlocks(); + this.setFileReplication((short) 0); + FileWithStripedBlocksFeature sb = new FileWithStripedBlocksFeature(); + addFeature(sb); + return sb; + } + + public boolean isWithStripedBlocks() { + return getStripedBlocksFeature() != null; + } + + /** Used to make sure there is no contiguous block related info */ + private boolean hasNoContiguousBlock() { + return (blocks == null || blocks.length == 0) && getFileReplication() == 0; + } + /* Start of Under-Construction Feature */ /** @@ -208,7 +234,7 @@ public class INodeFile extends INodeWithAdditionalFields "file is no longer under construction"); FileUnderConstructionFeature uc = getFileUnderConstructionFeature(); if (uc != null) { - assertAllBlocksComplete(); + assertAllBlocksComplete(getBlocks()); removeFeature(uc); this.setModificationTime(mtime); } @@ -216,37 +242,56 @@ public class INodeFile extends INodeWithAdditionalFields } /** Assert all blocks are complete. */ - private void assertAllBlocksComplete() { - if (blocks == null) { + private void assertAllBlocksComplete(BlockInfo[] blks) { + if (blks == null) { return; } - for (int i = 0; i < blocks.length; i++) { - Preconditions.checkState(blocks[i].isComplete(), "Failed to finalize" + for (int i = 0; i < blks.length; i++) { + Preconditions.checkState(blks[i].isComplete(), "Failed to finalize" + " %s %s since blocks[%s] is non-complete, where blocks=%s.", - getClass().getSimpleName(), this, i, Arrays.asList(blocks)); + getClass().getSimpleName(), this, i, Arrays.asList(blks)); } } + /** + * Instead of adding a new block, this function is usually used while loading + * fsimage or converting the last block to UC/Complete. + */ @Override // BlockCollection - public void setBlock(int index, BlockInfoContiguous blk) { - this.blocks[index] = blk; + public void setBlock(int index, BlockInfo blk) { + FileWithStripedBlocksFeature sb = getStripedBlocksFeature(); + if (sb == null) { + assert blk instanceof BlockInfoContiguous; + this.blocks[index] = (BlockInfoContiguous) blk; + } else { + assert blk instanceof BlockInfoStriped; + assert hasNoContiguousBlock(); + sb.setBlock(index, (BlockInfoStriped) blk); + } } @Override // BlockCollection, the file should be under construction - public BlockInfoContiguousUnderConstruction setLastBlock( - BlockInfoContiguous lastBlock, DatanodeStorageInfo[] locations) - throws IOException { + public void convertLastBlockToUC(BlockInfo lastBlock, + DatanodeStorageInfo[] locations) throws IOException { Preconditions.checkState(isUnderConstruction(), "file is no longer under construction"); - if (numBlocks() == 0) { throw new IOException("Failed to set last block: File is empty."); } - BlockInfoContiguousUnderConstruction ucBlock = - lastBlock.convertToBlockUnderConstruction( - BlockUCState.UNDER_CONSTRUCTION, locations); + + final BlockInfo ucBlock; + FileWithStripedBlocksFeature sb = getStripedBlocksFeature(); + if (sb == null) { + assert lastBlock instanceof BlockInfoContiguous; + ucBlock = ((BlockInfoContiguous) lastBlock) + .convertToBlockUnderConstruction(UNDER_CONSTRUCTION, locations); + } else { + assert hasNoContiguousBlock(); + assert lastBlock instanceof BlockInfoStriped; + ucBlock = ((BlockInfoStriped) lastBlock) + .convertToBlockUnderConstruction(UNDER_CONSTRUCTION, locations); + } setBlock(numBlocks() - 1, ucBlock); - return ucBlock; } /** @@ -256,19 +301,25 @@ public class INodeFile extends INodeWithAdditionalFields boolean removeLastBlock(Block oldblock) { Preconditions.checkState(isUnderConstruction(), "file is no longer under construction"); - if (blocks == null || blocks.length == 0) { - return false; - } - int size_1 = blocks.length - 1; - if (!blocks[size_1].equals(oldblock)) { - return false; - } + FileWithStripedBlocksFeature sb = getStripedBlocksFeature(); + if (sb == null) { + if (blocks == null || blocks.length == 0) { + return false; + } + int size_1 = blocks.length - 1; + if (!blocks[size_1].equals(oldblock)) { + return false; + } - //copy to a new list - BlockInfoContiguous[] newlist = new BlockInfoContiguous[size_1]; - System.arraycopy(blocks, 0, newlist, 0, size_1); - setBlocks(newlist); - return true; + //copy to a new list + BlockInfoContiguous[] newlist = new BlockInfoContiguous[size_1]; + System.arraycopy(blocks, 0, newlist, 0, size_1); + setContiguousBlocks(newlist); + return true; + } else { + assert hasNoContiguousBlock(); + return sb.removeLastBlock(oldblock); + } } /* End of Under-Construction Feature */ @@ -369,13 +420,15 @@ public class INodeFile extends INodeWithAdditionalFields } /** Set the replication factor of this file. */ - public final void setFileReplication(short replication) { + private void setFileReplication(short replication) { header = HeaderFormat.REPLICATION.BITS.combine(replication, header); } /** Set the replication factor of this file. */ public final INodeFile setFileReplication(short replication, int latestSnapshotId) throws QuotaExceededException { + Preconditions.checkState(!isWithStripedBlocks(), + "Cannot set replication to a file with striped blocks"); recordModification(latestSnapshotId); setFileReplication(replication); return this; @@ -413,42 +466,57 @@ public class INodeFile extends INodeWithAdditionalFields setStoragePolicyID(storagePolicyId); } - @Override + @Override // INodeFileAttributes public long getHeaderLong() { return header; } - /** @return the storagespace required for a full block. */ - final long getPreferredBlockStoragespace() { - return getPreferredBlockSize() * getBlockReplication(); + /** @return the blocks of the file. */ + @Override // BlockCollection + public BlockInfo[] getBlocks() { + FileWithStripedBlocksFeature sb = getStripedBlocksFeature(); + if (sb != null) { + assert hasNoContiguousBlock(); + return sb.getBlocks(); + } else { + return this.blocks; + } } - /** @return the blocks of the file. */ - @Override - public BlockInfoContiguous[] getBlocks() { + /** Used by snapshot diff */ + public BlockInfoContiguous[] getContiguousBlocks() { return this.blocks; } /** @return blocks of the file corresponding to the snapshot. */ - public BlockInfoContiguous[] getBlocks(int snapshot) { - if(snapshot == CURRENT_STATE_ID || getDiffs() == null) + public BlockInfo[] getBlocks(int snapshot) { + if (snapshot == CURRENT_STATE_ID || getDiffs() == null) { return getBlocks(); + } + // find blocks stored in snapshot diffs (for truncate) FileDiff diff = getDiffs().getDiffById(snapshot); - BlockInfoContiguous[] snapshotBlocks = - diff == null ? getBlocks() : diff.getBlocks(); - if(snapshotBlocks != null) + // note that currently FileDiff can only store contiguous blocks + BlockInfo[] snapshotBlocks = diff == null ? getBlocks() : diff.getBlocks(); + if (snapshotBlocks != null) { return snapshotBlocks; + } // Blocks are not in the current snapshot // Find next snapshot with blocks present or return current file blocks snapshotBlocks = getDiffs().findLaterSnapshotBlocks(snapshot); return (snapshotBlocks == null) ? getBlocks() : snapshotBlocks; } - void updateBlockCollection() { - if (blocks != null) { + /** Used during concat to update the BlockCollection for each block */ + private void updateBlockCollection() { + if (blocks != null && blocks.length > 0) { for(BlockInfoContiguous b : blocks) { b.setBlockCollection(this); } + } else { + FileWithStripedBlocksFeature sb = getStripedBlocksFeature(); + if (sb != null) { + sb.updateBlockCollection(this); + } } } @@ -471,33 +539,33 @@ public class INodeFile extends INodeWithAdditionalFields size += in.blocks.length; } - setBlocks(newlist); + setContiguousBlocks(newlist); updateBlockCollection(); } /** - * add a block to the block list + * add a contiguous block to the block list */ void addBlock(BlockInfoContiguous newblock) { if (this.blocks == null) { - this.setBlocks(new BlockInfoContiguous[]{newblock}); + this.setContiguousBlocks(new BlockInfoContiguous[]{newblock}); } else { int size = this.blocks.length; BlockInfoContiguous[] newlist = new BlockInfoContiguous[size + 1]; System.arraycopy(this.blocks, 0, newlist, 0, size); newlist[size] = newblock; - this.setBlocks(newlist); + this.setContiguousBlocks(newlist); } } /** Set the blocks. */ - public void setBlocks(BlockInfoContiguous[] blocks) { + public void setContiguousBlocks(BlockInfoContiguous[] blocks) { this.blocks = blocks; } @Override - public QuotaCounts cleanSubtree(BlockStoragePolicySuite bsps, final int snapshot, - int priorSnapshotId, + public QuotaCounts cleanSubtree(BlockStoragePolicySuite bsps, + final int snapshot, int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) { FileWithSnapshotFeature sf = getFileWithSnapshotFeature(); @@ -527,13 +595,19 @@ public class INodeFile extends INodeWithAdditionalFields @Override public void destroyAndCollectBlocks(BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) { - if (blocks != null && collectedBlocks != null) { - for (BlockInfoContiguous blk : blocks) { + BlockInfo[] blks = getBlocks(); + if (blks != null && collectedBlocks != null) { + for (BlockInfo blk : blks) { collectedBlocks.addDeleteBlock(blk); blk.setBlockCollection(null); } } - setBlocks(null); + setContiguousBlocks(null); + + FileWithStripedBlocksFeature sb = getStripedBlocksFeature(); + if (sb != null) { + sb.clear(); + } if (getAclFeature() != null) { AclStorage.removeAclFeature(getAclFeature()); } @@ -725,7 +799,7 @@ public class INodeFile extends INodeWithAdditionalFields size += block.getNumBytes(); } // check if the last block is under construction - BlockInfoContiguous lastBlock = getLastBlock(); + BlockInfo lastBlock = getLastBlock(); if(lastBlock != null && lastBlock instanceof BlockInfoContiguousUnderConstruction) { size += getPreferredBlockSize() - lastBlock.getNumBytes(); @@ -733,15 +807,6 @@ public class INodeFile extends INodeWithAdditionalFields return size; } - public final long storagespaceConsumed(int lastSnapshotId) { - if (lastSnapshotId != CURRENT_STATE_ID) { - return computeFileSize(lastSnapshotId) - * getFileReplication(lastSnapshotId); - } else { - return storagespaceConsumed(); - } - } - public final short getReplication(int lastSnapshotId) { if (lastSnapshotId != CURRENT_STATE_ID) { return getFileReplication(lastSnapshotId); @@ -761,21 +826,33 @@ public class INodeFile extends INodeWithAdditionalFields /** * Return the penultimate allocated block for this file. */ - BlockInfoContiguous getPenultimateBlock() { - if (blocks == null || blocks.length <= 1) { - return null; - } - return blocks[blocks.length - 2]; + BlockInfo getPenultimateBlock() { + BlockInfo[] blks = getBlocks(); + return (blks == null || blks.length <= 1) ? + null : blks[blks.length - 2]; } @Override - public BlockInfoContiguous getLastBlock() { - return blocks == null || blocks.length == 0? null: blocks[blocks.length-1]; + public BlockInfo getLastBlock() { + FileWithStripedBlocksFeature sb = getStripedBlocksFeature(); + if (sb == null) { + return blocks == null || blocks.length == 0 ? + null : blocks[blocks.length - 1]; + } else { + assert hasNoContiguousBlock(); + return sb.getLastBlock(); + } } @Override public int numBlocks() { - return blocks == null ? 0 : blocks.length; + FileWithStripedBlocksFeature sb = getStripedBlocksFeature(); + if (sb == null) { + return blocks == null ? 0 : blocks.length; + } else { + assert hasNoContiguousBlock(); + return sb.numBlocks(); + } } @VisibleForTesting @@ -787,6 +864,7 @@ public class INodeFile extends INodeWithAdditionalFields // only compare the first block out.print(", blocks="); out.print(blocks == null || blocks.length == 0? null: blocks[0]); + // TODO print striped blocks out.println(); } @@ -796,9 +874,10 @@ public class INodeFile extends INodeWithAdditionalFields */ public long collectBlocksBeyondMax(final long max, final BlocksMapUpdateInfo collectedBlocks) { - final BlockInfoContiguous[] oldBlocks = getBlocks(); - if (oldBlocks == null) + final BlockInfo[] oldBlocks = getBlocks(); + if (oldBlocks == null) { return 0; + } // find the minimum n such that the size of the first n blocks > max int n = 0; long size = 0; @@ -859,21 +938,36 @@ public class INodeFile extends INodeWithAdditionalFields } void truncateBlocksTo(int n) { + FileWithStripedBlocksFeature sb = getStripedBlocksFeature(); + if (sb == null) { + truncateContiguousBlocks(n); + } else { + sb.truncateStripedBlocks(n); + } + } + + private void truncateContiguousBlocks(int n) { final BlockInfoContiguous[] newBlocks; if (n == 0) { newBlocks = BlockInfoContiguous.EMPTY_ARRAY; } else { newBlocks = new BlockInfoContiguous[n]; - System.arraycopy(getBlocks(), 0, newBlocks, 0, n); + System.arraycopy(blocks, 0, newBlocks, 0, n); } // set new blocks - setBlocks(newBlocks); + setContiguousBlocks(newBlocks); } + /** + * This function is only called when block list is stored in snapshot + * diffs. Note that this can only happen when truncation happens with + * snapshots. Since we do not support truncation with striped blocks, + * we only need to handle contiguous blocks here. + */ public void collectBlocksBeyondSnapshot(BlockInfoContiguous[] snapshotBlocks, BlocksMapUpdateInfo collectedBlocks) { - BlockInfoContiguous[] oldBlocks = getBlocks(); - if(snapshotBlocks == null || oldBlocks == null) + BlockInfoContiguous[] oldBlocks = this.blocks; + if (snapshotBlocks == null || oldBlocks == null) return; // Skip blocks in common between the file and the snapshot int n = 0; @@ -881,7 +975,7 @@ public class INodeFile extends INodeWithAdditionalFields oldBlocks[n] == snapshotBlocks[n]) { n++; } - truncateBlocksTo(n); + truncateContiguousBlocks(n); // Collect the remaining blocks of the file while(n < oldBlocks.length) { collectedBlocks.addDeleteBlock(oldBlocks[n++]); http://git-wip-us.apache.org/repos/asf/hadoop/blob/7527a599/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java index 9ce8ebc..6cd11b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java @@ -37,7 +37,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.util.Daemon; @@ -120,10 +120,10 @@ public class LeaseManager { } catch (UnresolvedLinkException e) { throw new AssertionError("Lease files should reside on this FS"); } - BlockInfoContiguous[] blocks = cons.getBlocks(); + BlockInfo[] blocks = cons.getBlocks(); if(blocks == null) continue; - for(BlockInfoContiguous b : blocks) { + for(BlockInfo b : blocks) { if(!b.isComplete()) numUCBlocks++; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/7527a599/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index 0b3ed88..92e9f08 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -63,6 +63,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactor import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; @@ -235,8 +236,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory { //get blockInfo Block block = new Block(Block.getBlockId(blockId)); //find which file this block belongs to - BlockInfoContiguous blockInfo = namenode.getNamesystem() - .getStoredBlock(block); + BlockInfo blockInfo = namenode.getNamesystem().getStoredBlock(block); if(blockInfo == null) { out.println("Block "+ blockId +" " + NONEXISTENT_STATUS); LOG.warn("Block "+ blockId + " " + NONEXISTENT_STATUS); http://git-wip-us.apache.org/repos/asf/hadoop/blob/7527a599/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java index 3442e7b..4695c3f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.util.RwLock; @@ -45,5 +46,5 @@ public interface Namesystem extends RwLock, SafeMode { public void checkOperation(OperationCategory read) throws StandbyException; - public boolean isInSnapshot(BlockInfoContiguousUnderConstruction blockUC); + public boolean isInSnapshot(BlockCollection bc); } \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/7527a599/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java index 87b370a..74baec5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java @@ -239,15 +239,16 @@ public class FSImageFormatPBSnapshot { FileDiff diff = new FileDiff(pbf.getSnapshotId(), copy, null, pbf.getFileSize()); List<BlockProto> bpl = pbf.getBlocksList(); - // TODO: also persist striped blocks + // in file diff there can only be contiguous blocks BlockInfoContiguous[] blocks = new BlockInfoContiguous[bpl.size()]; for(int j = 0, e = bpl.size(); j < e; ++j) { Block blk = PBHelper.convert(bpl.get(j)); BlockInfoContiguous storedBlock = (BlockInfoContiguous) fsn.getBlockManager().getStoredBlock(blk); if(storedBlock == null) { - storedBlock = fsn.getBlockManager().addBlockCollection( - new BlockInfoContiguous(blk, copy.getFileReplication()), file); + storedBlock = (BlockInfoContiguous) fsn.getBlockManager() + .addBlockCollection(new BlockInfoContiguous(blk, + copy.getFileReplication()), file); } blocks[j] = storedBlock; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/7527a599/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java index 5c9e121..a1263c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java @@ -54,8 +54,11 @@ public class FileDiffList extends INodeFileAttributes snapshotCopy, boolean withBlocks) { final FileDiff diff = super.saveSelf2Snapshot(latestSnapshotId, iNodeFile, snapshotCopy); - if(withBlocks) // Store blocks if this is the first update - diff.setBlocks(iNodeFile.getBlocks()); + if (withBlocks) { // Store blocks if this is the first update + BlockInfoContiguous[] blks = iNodeFile.getContiguousBlocks(); + assert blks != null; + diff.setBlocks(blks); + } } public BlockInfoContiguous[] findEarlierSnapshotBlocks(int snapshotId) { @@ -118,7 +121,7 @@ public class FileDiffList extends (earlierDiff == null ? new BlockInfoContiguous[]{} : earlierDiff.getBlocks()); // Find later snapshot (or file itself) with blocks BlockInfoContiguous[] laterBlocks = findLaterSnapshotBlocks(removed.getSnapshotId()); - laterBlocks = (laterBlocks==null) ? file.getBlocks() : laterBlocks; + laterBlocks = (laterBlocks == null) ? file.getContiguousBlocks() : laterBlocks; // Skip blocks, which belong to either the earlier or the later lists int i = 0; for(; i < removedBlocks.length; i++) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/7527a599/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto index 3bd1d91..b6fd033 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto @@ -91,6 +91,10 @@ message INodeSection { optional string clientMachine = 2; } + message StripedBlocksFeature { + repeated StripedBlockProto blocks = 1; + } + message AclFeatureProto { /** * An ACL entry is represented by a 32-bit integer in Big Endian @@ -139,6 +143,7 @@ message INodeSection { optional AclFeatureProto acl = 8; optional XAttrFeatureProto xAttrs = 9; optional uint32 storagePolicyID = 10; + optional StripedBlocksFeature stripedBlocks = 11; } message QuotaByStorageTypeEntryProto { http://git-wip-us.apache.org/repos/asf/hadoop/blob/7527a599/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index 7d94f04..58dbac5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -491,6 +491,16 @@ message BlockProto { } /** + * Striped block information. Besides the basic information for a block, + * it also contains the number of data/parity blocks. + */ +message StripedBlockProto { + required BlockProto block = 1; + optional uint32 dataBlockNum = 2; + optional uint32 parityBlockNum = 3; +} + +/** * Block and datanodes where is it located */ message BlockWithLocationsProto { http://git-wip-us.apache.org/repos/asf/hadoop/blob/7527a599/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 6d3bc1e..3eba280 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; @@ -1563,7 +1564,7 @@ public class DFSTestUtil { public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn, ExtendedBlock blk) { FSNamesystem fsn = nn.getNamesystem(); - BlockInfoContiguous storedBlock = fsn.getStoredBlock(blk.getLocalBlock()); + BlockInfo storedBlock = fsn.getStoredBlock(blk.getLocalBlock()); assertTrue("Block " + blk + " should be under construction, " + "got: " + storedBlock, storedBlock instanceof BlockInfoContiguousUnderConstruction); http://git-wip-us.apache.org/repos/asf/hadoop/blob/7527a599/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index fd28ded..6cd0d1c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -1240,8 +1240,8 @@ public class TestReplicationPolicy { (DatanodeStorageInfo.AddBlockResult.ADDED); ucBlock.addStorage(storage, ucBlock); - when(mbc.setLastBlock((BlockInfoContiguous) any(), (DatanodeStorageInfo[]) any())) - .thenReturn(ucBlock); + BlockInfo lastBlk = mbc.getLastBlock(); + when(mbc.getLastBlock()).thenReturn(lastBlk, ucBlock); bm.convertLastBlockToUnderConstruction(mbc, 0L); http://git-wip-us.apache.org/repos/asf/hadoop/blob/7527a599/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlock.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlock.java index a417c3d..301ee25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlock.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.junit.After; import org.junit.Before; @@ -87,21 +87,21 @@ public class TestAddBlock { // check file1 INodeFile file1Node = fsdir.getINode4Write(file1.toString()).asFile(); - BlockInfoContiguous[] file1Blocks = file1Node.getBlocks(); + BlockInfo[] file1Blocks = file1Node.getBlocks(); assertEquals(1, file1Blocks.length); assertEquals(BLOCKSIZE - 1, file1Blocks[0].getNumBytes()); assertEquals(BlockUCState.COMPLETE, file1Blocks[0].getBlockUCState()); // check file2 INodeFile file2Node = fsdir.getINode4Write(file2.toString()).asFile(); - BlockInfoContiguous[] file2Blocks = file2Node.getBlocks(); + BlockInfo[] file2Blocks = file2Node.getBlocks(); assertEquals(1, file2Blocks.length); assertEquals(BLOCKSIZE, file2Blocks[0].getNumBytes()); assertEquals(BlockUCState.COMPLETE, file2Blocks[0].getBlockUCState()); // check file3 INodeFile file3Node = fsdir.getINode4Write(file3.toString()).asFile(); - BlockInfoContiguous[] file3Blocks = file3Node.getBlocks(); + BlockInfo[] file3Blocks = file3Node.getBlocks(); assertEquals(2, file3Blocks.length); assertEquals(BLOCKSIZE, file3Blocks[0].getNumBytes()); assertEquals(BlockUCState.COMPLETE, file3Blocks[0].getBlockUCState()); @@ -110,7 +110,7 @@ public class TestAddBlock { // check file4 INodeFile file4Node = fsdir.getINode4Write(file4.toString()).asFile(); - BlockInfoContiguous[] file4Blocks = file4Node.getBlocks(); + BlockInfo[] file4Blocks = file4Node.getBlocks(); assertEquals(2, file4Blocks.length); assertEquals(BLOCKSIZE, file4Blocks[0].getNumBytes()); assertEquals(BlockUCState.COMPLETE, file4Blocks[0].getBlockUCState()); @@ -141,7 +141,7 @@ public class TestAddBlock { FSDirectory fsdir = cluster.getNamesystem().getFSDirectory(); INodeFile fileNode = fsdir.getINode4Write(file1.toString()).asFile(); - BlockInfoContiguous[] fileBlocks = fileNode.getBlocks(); + BlockInfo[] fileBlocks = fileNode.getBlocks(); assertEquals(2, fileBlocks.length); assertEquals(BLOCKSIZE, fileBlocks[0].getNumBytes()); assertEquals(BlockUCState.COMPLETE, fileBlocks[0].getBlockUCState()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/7527a599/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java index 06dfade..a2ef7b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.junit.After; import org.junit.Before; @@ -75,7 +76,7 @@ public class TestAddBlockgroup { final Path file1 = new Path("/file1"); DFSTestUtil.createFile(fs, file1, BLOCKSIZE * 2, REPLICATION, 0L); INodeFile file1Node = fsdir.getINode4Write(file1.toString()).asFile(); - BlockInfoContiguous[] file1Blocks = file1Node.getBlocks(); + BlockInfo[] file1Blocks = file1Node.getBlocks(); assertEquals(2, file1Blocks.length); assertEquals(GROUP_SIZE, file1Blocks[0].numNodes()); assertEquals(HdfsConstants.MAX_BLOCKS_IN_GROUP, http://git-wip-us.apache.org/repos/asf/hadoop/blob/7527a599/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java index 1fbe160..f372bec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hdfs.TestFileCreation; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; @@ -91,12 +91,12 @@ public class TestBlockUnderConstruction { " isUnderConstruction = " + inode.isUnderConstruction() + " expected to be " + isFileOpen, inode.isUnderConstruction() == isFileOpen); - BlockInfoContiguous[] blocks = inode.getBlocks(); + BlockInfo[] blocks = inode.getBlocks(); assertTrue("File does not have blocks: " + inode.toString(), blocks != null && blocks.length > 0); int idx = 0; - BlockInfoContiguous curBlock; + BlockInfo curBlock; // all blocks but the last two should be regular blocks for(; idx < blocks.length - 2; idx++) { curBlock = blocks[idx]; http://git-wip-us.apache.org/repos/asf/hadoop/blob/7527a599/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java index 7b9ea93..913e0a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java @@ -24,6 +24,7 @@ import java.io.File; import java.io.IOException; import java.util.EnumSet; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.junit.Assert; import org.apache.hadoop.conf.Configuration; @@ -39,7 +40,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; @@ -105,7 +105,7 @@ public class TestFSImage { INodeFile file2Node = fsn.dir.getINode4Write(file2.toString()).asFile(); assertEquals("hello".length(), file2Node.computeFileSize()); assertTrue(file2Node.isUnderConstruction()); - BlockInfoContiguous[] blks = file2Node.getBlocks(); + BlockInfo[] blks = file2Node.getBlocks(); assertEquals(1, blks.length); assertEquals(BlockUCState.UNDER_CONSTRUCTION, blks[0].getBlockUCState()); // check lease manager http://git-wip-us.apache.org/repos/asf/hadoop/blob/7527a599/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index fbcc73f..3e27107 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; @@ -1036,7 +1037,8 @@ public class TestFileTruncate { iip = fsn.getFSDirectory().getINodesInPath(src, true); file = iip.getLastINode().asFile(); file.recordModification(iip.getLatestSnapshotId(), true); - assertThat(file.isBlockInLatestSnapshot(file.getLastBlock()), is(true)); + assertThat(file.isBlockInLatestSnapshot( + (BlockInfoContiguous) file.getLastBlock()), is(true)); initialGenStamp = file.getLastBlock().getGenerationStamp(); // Test that prepareFileForTruncate sets up copy-on-write truncate fsn.writeLock(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/7527a599/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 70deb1b..cddc457 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -77,7 +77,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; @@ -801,7 +801,7 @@ public class TestFsck { // intentionally corrupt NN data structure INodeFile node = (INodeFile) cluster.getNamesystem().dir.getINode (fileName, true); - final BlockInfoContiguous[] blocks = node.getBlocks(); + final BlockInfo[] blocks = node.getBlocks(); assertEquals(blocks.length, 1); blocks[0].setNumBytes(-1L); // set the block length to be negative http://git-wip-us.apache.org/repos/asf/hadoop/blob/7527a599/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java index 85072d1..7bffb33 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -108,14 +108,14 @@ public class TestSnapshotBlocksMap { final FSDirectory dir, final BlockManager blkManager) throws Exception { final INodeFile file = INodeFile.valueOf(dir.getINode(path), path); assertEquals(numBlocks, file.getBlocks().length); - for(BlockInfoContiguous b : file.getBlocks()) { + for(BlockInfo b : file.getBlocks()) { assertBlockCollection(blkManager, file, b); } return file; } static void assertBlockCollection(final BlockManager blkManager, - final INodeFile file, final BlockInfoContiguous b) { + final INodeFile file, final BlockInfo b) { Assert.assertSame(b, blkManager.getStoredBlock(b)); Assert.assertSame(file, blkManager.getBlockCollection(b)); Assert.assertSame(file, b.getBlockCollection()); @@ -146,10 +146,10 @@ public class TestSnapshotBlocksMap { { final INodeFile f2 = assertBlockCollection(file2.toString(), 3, fsdir, blockmanager); - BlockInfoContiguous[] blocks = f2.getBlocks(); + BlockInfo[] blocks = f2.getBlocks(); hdfs.delete(sub2, true); // The INode should have been removed from the blocksMap - for(BlockInfoContiguous b : blocks) { + for(BlockInfo b : blocks) { assertNull(blockmanager.getBlockCollection(b)); } } @@ -177,7 +177,7 @@ public class TestSnapshotBlocksMap { // Check the block information for file0 final INodeFile f0 = assertBlockCollection(file0.toString(), 4, fsdir, blockmanager); - BlockInfoContiguous[] blocks0 = f0.getBlocks(); + BlockInfo[] blocks0 = f0.getBlocks(); // Also check the block information for snapshot of file0 Path snapshotFile0 = SnapshotTestHelper.getSnapshotPath(sub1, "s0", @@ -187,7 +187,7 @@ public class TestSnapshotBlocksMap { // Delete file0 hdfs.delete(file0, true); // Make sure the blocks of file0 is still in blocksMap - for(BlockInfoContiguous b : blocks0) { + for(BlockInfo b : blocks0) { assertNotNull(blockmanager.getBlockCollection(b)); } assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager); @@ -201,7 +201,7 @@ public class TestSnapshotBlocksMap { hdfs.deleteSnapshot(sub1, "s1"); // Make sure the first block of file0 is still in blocksMap - for(BlockInfoContiguous b : blocks0) { + for(BlockInfo b : blocks0) { assertNotNull(blockmanager.getBlockCollection(b)); } assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager); @@ -293,7 +293,7 @@ public class TestSnapshotBlocksMap { hdfs.append(bar); INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile(); - BlockInfoContiguous[] blks = barNode.getBlocks(); + BlockInfo[] blks = barNode.getBlocks(); assertEquals(1, blks.length); assertEquals(BLOCKSIZE, blks[0].getNumBytes()); ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]); @@ -331,7 +331,7 @@ public class TestSnapshotBlocksMap { hdfs.append(bar); INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile(); - BlockInfoContiguous[] blks = barNode.getBlocks(); + BlockInfo[] blks = barNode.getBlocks(); assertEquals(1, blks.length); ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]); cluster.getNameNodeRpc() @@ -370,7 +370,7 @@ public class TestSnapshotBlocksMap { hdfs.append(bar); INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile(); - BlockInfoContiguous[] blks = barNode.getBlocks(); + BlockInfo[] blks = barNode.getBlocks(); assertEquals(1, blks.length); ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]); cluster.getNameNodeRpc() @@ -421,7 +421,7 @@ public class TestSnapshotBlocksMap { out.write(testData); out.close(); INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile(); - BlockInfoContiguous[] blks = barNode.getBlocks(); + BlockInfo[] blks = barNode.getBlocks(); assertEquals(1, blks.length); assertEquals(testData.length, blks[0].getNumBytes()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/7527a599/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java index a679183..452ff3f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java @@ -42,7 +42,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -260,12 +260,12 @@ public class TestSnapshotDeletion { DFSTestUtil.createFile(hdfs, tempFile, BLOCKSIZE, REPLICATION, seed); final INodeFile temp = TestSnapshotBlocksMap.assertBlockCollection( tempFile.toString(), 1, fsdir, blockmanager); - BlockInfoContiguous[] blocks = temp.getBlocks(); + BlockInfo[] blocks = temp.getBlocks(); hdfs.delete(tempDir, true); // check dir's quota usage checkQuotaUsageComputation(dir, 8, BLOCKSIZE * REPLICATION * 3); // check blocks of tempFile - for (BlockInfoContiguous b : blocks) { + for (BlockInfo b : blocks) { assertNull(blockmanager.getBlockCollection(b)); } @@ -342,7 +342,7 @@ public class TestSnapshotDeletion { // while deletion, we add diff for subsub and metaChangeFile1, and remove // newFile checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 4); - for (BlockInfoContiguous b : blocks) { + for (BlockInfo b : blocks) { assertNull(blockmanager.getBlockCollection(b)); } @@ -479,7 +479,7 @@ public class TestSnapshotDeletion { final INodeFile toDeleteFileNode = TestSnapshotBlocksMap .assertBlockCollection(toDeleteFile.toString(), 1, fsdir, blockmanager); - BlockInfoContiguous[] blocks = toDeleteFileNode.getBlocks(); + BlockInfo[] blocks = toDeleteFileNode.getBlocks(); // create snapshot s0 on dir SnapshotTestHelper.createSnapshot(hdfs, dir, "s0"); @@ -505,7 +505,7 @@ public class TestSnapshotDeletion { // metaChangeDir's diff, dir's diff. diskspace: remove toDeleteFile, and // metaChangeFile's replication factor decreases checkQuotaUsageComputation(dir, 6, 2 * BLOCKSIZE * REPLICATION - BLOCKSIZE); - for (BlockInfoContiguous b : blocks) { + for (BlockInfo b : blocks) { assertNull(blockmanager.getBlockCollection(b)); } @@ -799,7 +799,7 @@ public class TestSnapshotDeletion { FileStatus statusBeforeDeletion13 = hdfs.getFileStatus(file13_s1); INodeFile file14Node = TestSnapshotBlocksMap.assertBlockCollection( file14_s2.toString(), 1, fsdir, blockmanager); - BlockInfoContiguous[] blocks_14 = file14Node.getBlocks(); + BlockInfo[] blocks_14 = file14Node.getBlocks(); TestSnapshotBlocksMap.assertBlockCollection(file15_s2.toString(), 1, fsdir, blockmanager); @@ -836,7 +836,7 @@ public class TestSnapshotDeletion { modDirStr + "file15"); assertFalse(hdfs.exists(file14_s1)); assertFalse(hdfs.exists(file15_s1)); - for (BlockInfoContiguous b : blocks_14) { + for (BlockInfo b : blocks_14) { assertNull(blockmanager.getBlockCollection(b)); }