Repository: hadoop Updated Branches: refs/heads/HDFS-7285-merge d94493b69 -> 601ca5375 (forced update)
Fix minor issues when merging trunk (DFS-6407) to HDFS-7285 branch. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/601ca537 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/601ca537 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/601ca537 Branch: refs/heads/HDFS-7285-merge Commit: 601ca5375986c6e98f74f292da282f86ce7d543c Parents: cc10933 Author: Zhe Zhang <zhezh...@cloudera.com> Authored: Thu Aug 20 11:33:55 2015 -0700 Committer: Zhe Zhang <zhezh...@cloudera.com> Committed: Thu Aug 20 14:16:36 2015 -0700 ---------------------------------------------------------------------- .../src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java | 1 - .../src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java | 4 +--- .../apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java | 2 +- .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java | 4 ++-- .../org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java | 5 ++--- .../apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java | 4 ++-- .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 4 ++-- .../org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java | 2 +- 8 files changed, 11 insertions(+), 15 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/601ca537/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index abc37c9..35c4f9a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -44,7 +44,6 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import com.google.common.base.Preconditions; import org.apache.commons.io.IOUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.ByteBufferReadable; http://git-wip-us.apache.org/repos/asf/hadoop/blob/601ca537/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index 5d03fd2..00f3a65 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -24,8 +24,6 @@ import java.nio.channels.ClosedChannelException; import java.util.EnumSet; import java.util.concurrent.atomic.AtomicReference; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.crypto.CryptoProtocolVersion; @@ -358,7 +356,7 @@ public class DFSOutputStream extends FSOutputSummer String[] favoredNodes) throws IOException { TraceScope scope = dfsClient.getPathTraceScope("newStreamForAppend", src); - if(stat.getReplication() == 0) { + if(stat.getErasureCodingPolicy() != null) { throw new IOException("Not support appending to a striping layout file yet."); } try { http://git-wip-us.apache.org/repos/asf/hadoop/blob/601ca537/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index 9387176..bf11914 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -29,7 +29,7 @@ import org.apache.hadoop.util.LightWeightGSet; * where the replicas of the block, or blocks belonging to the erasure coding * block group, are stored. */ -public abstract class BlockInfo extends Block +public abstract class BlockInfo extends Block implements LightWeightGSet.LinkedElement { public static final BlockInfo[] EMPTY_ARRAY = {}; http://git-wip-us.apache.org/repos/asf/hadoop/blob/601ca537/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index a64e50c..dfea5f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -892,8 +892,8 @@ public class BlockManager implements BlockStatsMXBean { } /** @return a LocatedBlock for the given block */ - private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos - ) throws IOException { + private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos) + throws IOException { if (!blk.isComplete()) { if (blk.isStriped()) { final BlockInfoStripedUnderConstruction uc = http://git-wip-us.apache.org/repos/asf/hadoop/blob/601ca537/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java index c2c4155..a37947e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; @@ -214,6 +213,7 @@ final class FSDirTruncateOp { assert fsn.hasWriteLock(); INodeFile file = iip.getLastINode().asFile(); + assert !file.isStriped(); file.recordModification(iip.getLatestSnapshotId()); file.toUnderConstruction(leaseHolder, clientMachine); assert file.isUnderConstruction() : "inode should be under construction."; @@ -221,12 +221,11 @@ final class FSDirTruncateOp { file.getFileUnderConstructionFeature().getClientName(), file.getId()); boolean shouldRecoverNow = (newBlock == null); BlockInfo oldBlock = file.getLastBlock(); - assert !oldBlock.isStriped(); boolean shouldCopyOnTruncate = shouldCopyOnTruncate(fsn, file, oldBlock); if (newBlock == null) { newBlock = (shouldCopyOnTruncate) ? - fsn.createNewBlock(file.isStriped()) : new Block( + fsn.createNewBlock(false) : new Block( oldBlock.getBlockId(), oldBlock.getNumBytes(), fsn.nextGenerationStamp(fsn.getBlockIdManager().isLegacyBlock( oldBlock))); http://git-wip-us.apache.org/repos/asf/hadoop/blob/601ca537/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java index 086aa05..b9466f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java @@ -213,8 +213,8 @@ class FSDirWriteFileOp { DatanodeStorageInfo[] locs, long offset) throws IOException { LocatedBlock lBlk = BlockManager.newLocatedBlock(fsn.getExtendedBlock(blk), blk, locs, offset); - fsn.getFSDirectory().getBlockManager(). - setBlockToken(lBlk, BlockTokenIdentifier.AccessMode.WRITE); + fsn.getBlockManager().setBlockToken(lBlk, + BlockTokenIdentifier.AccessMode.WRITE); return lBlk; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/601ca537/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 2eb9f2a..a27bf11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -3775,8 +3775,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, while (it.hasNext()) { Block b = it.next(); BlockInfo blockInfo = getStoredBlock(b); - if (blockInfo.getBlockCollection().getStoragePolicyID() == - lpPolicy.getId()) { + if (blockInfo.getBlockCollection().getStoragePolicyID() + == lpPolicy.getId()) { filesToDelete.add(blockInfo.getBlockCollection()); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/601ca537/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index 46e2b98..c47a256 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -456,7 +456,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory { return; } - final Result r = file.getReplication() == 0? ecRes: replRes; + final Result r = file.getErasureCodingPolicy() == null ? ecRes: replRes; collectFileSummary(path, file, r, blocks); if (showprogress && (replRes.totalFiles + ecRes.totalFiles) % 100 == 0) { out.println();