http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index cf0325e..409967e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -30,7 +30,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.Block; @@ -511,7 +510,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> { */ public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps) { final byte storagePolicyId = isSymlink() ? - HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED : getStoragePolicyID(); + HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED : getStoragePolicyID(); return computeQuotaUsage(bsps, storagePolicyId, new QuotaCounts.Builder().build(), true, Snapshot.CURRENT_STATE_ID); } @@ -555,7 +554,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> { public final QuotaCounts computeQuotaUsage( BlockStoragePolicySuite bsps, QuotaCounts counts, boolean useCache) { final byte storagePolicyId = isSymlink() ? - HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED : getStoragePolicyID(); + HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED : getStoragePolicyID(); return computeQuotaUsage(bsps, storagePolicyId, counts, useCache, Snapshot.CURRENT_STATE_ID); } @@ -712,7 +711,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> { /** * @return the storage policy directly specified on the INode. Return - * {@link HdfsConstantsClient#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED} if no policy has + * {@link HdfsConstants#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED} if no policy has * been specified. */ public abstract byte getLocalStoragePolicyID(); @@ -721,13 +720,13 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> { * Get the storage policy ID while computing quota usage * @param parentStoragePolicyId the storage policy ID of the parent directory * @return the storage policy ID of this INode. Note that for an - * {@link INodeSymlink} we return {@link HdfsConstantsClient#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED} + * {@link INodeSymlink} we return {@link HdfsConstants#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED} * instead of throwing Exception */ public byte getStoragePolicyIDForQuota(byte parentStoragePolicyId) { byte localId = isSymlink() ? - HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED : getLocalStoragePolicyID(); - return localId != HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED ? + HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED : getLocalStoragePolicyID(); + return localId != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED ? localId : parentStoragePolicyId; }
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index 12fa7aa..098594d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -45,7 +45,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; -import static org.apache.hadoop.hdfs.protocol.HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; +import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; /** * Directory INode class. http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index e9d3644..110bd71 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.apache.hadoop.hdfs.protocol.HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; +import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID; import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID; http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java index 5344ca7..00b33cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java @@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.FileNotFoundException; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.util.SequentialNumber; /** @@ -44,7 +44,7 @@ public class INodeId extends SequentialNumber { */ public static void checkId(long requestId, INode inode) throws FileNotFoundException { - if (requestId != HdfsConstantsClient.GRANDFATHER_INODE_ID && requestId != inode.getId()) { + if (requestId != HdfsConstants.GRANDFATHER_INODE_ID && requestId != inode.getId()) { throw new FileNotFoundException( "ID mismatch. Request id and saved id: " + requestId + " , " + inode.getId() + " for file " + inode.getFullPathName()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java index 9a1e1f4..7b1332b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java @@ -22,7 +22,7 @@ import java.util.List; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.util.GSet; import org.apache.hadoop.util.LightWeightGSet; @@ -124,12 +124,12 @@ public class INodeMap { @Override public byte getStoragePolicyID(){ - return HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; + return HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; } @Override public byte getLocalStoragePolicyID() { - return HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; + return HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; } }; http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java index f1892c5..72ca6ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; @@ -49,7 +50,7 @@ public class INodesInPath { */ private static boolean isDotSnapshotDir(byte[] pathComponent) { return pathComponent != null && - Arrays.equals(HdfsConstants.DOT_SNAPSHOT_DIR_BYTES, pathComponent); + Arrays.equals(HdfsServerConstants.DOT_SNAPSHOT_DIR_BYTES, pathComponent); } static INodesInPath fromINode(INode inode) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java index 9ce8ebc..c6a92be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java @@ -36,7 +36,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.util.Daemon; @@ -72,8 +71,8 @@ public class LeaseManager { private final FSNamesystem fsnamesystem; - private long softLimit = HdfsConstants.LEASE_SOFTLIMIT_PERIOD; - private long hardLimit = HdfsConstants.LEASE_HARDLIMIT_PERIOD; + private long softLimit = HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD; + private long hardLimit = HdfsServerConstants.LEASE_HARDLIMIT_PERIOD; // // Used for handling lock-leases http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java index dbb2c50..26a13bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java @@ -37,7 +37,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; @@ -129,7 +128,7 @@ public class NNStorage extends Storage implements Closeable, * recent fsimage file. This does not include any transactions * that have since been written to the edit log. */ - protected volatile long mostRecentCheckpointTxId = HdfsConstants.INVALID_TXID; + protected volatile long mostRecentCheckpointTxId = HdfsServerConstants.INVALID_TXID; /** * Time of the last checkpoint, in milliseconds since the epoch. @@ -558,7 +557,7 @@ public class NNStorage extends Storage implements Closeable, */ public void format(NamespaceInfo nsInfo) throws IOException { Preconditions.checkArgument(nsInfo.getLayoutVersion() == 0 || - nsInfo.getLayoutVersion() == HdfsConstants.NAMENODE_LAYOUT_VERSION, + nsInfo.getLayoutVersion() == HdfsServerConstants.NAMENODE_LAYOUT_VERSION, "Bad layout version: %s", nsInfo.getLayoutVersion()); this.setStorageInfo(nsInfo); @@ -577,7 +576,7 @@ public class NNStorage extends Storage implements Closeable, } public void format() throws IOException { - this.layoutVersion = HdfsConstants.NAMENODE_LAYOUT_VERSION; + this.layoutVersion = HdfsServerConstants.NAMENODE_LAYOUT_VERSION; for (Iterator<StorageDirectory> it = dirIterator(); it.hasNext();) { StorageDirectory sd = it.next(); @@ -634,7 +633,7 @@ public class NNStorage extends Storage implements Closeable, "storage directory " + sd.getRoot().getAbsolutePath()); } props.setProperty("layoutVersion", - Integer.toString(HdfsConstants.NAMENODE_LAYOUT_VERSION)); + Integer.toString(HdfsServerConstants.NAMENODE_LAYOUT_VERSION)); } setFieldsFromProperties(props, sd); } @@ -657,7 +656,7 @@ public class NNStorage extends Storage implements Closeable, * This should only be used during upgrades. */ String getDeprecatedProperty(String prop) { - assert getLayoutVersion() > HdfsConstants.NAMENODE_LAYOUT_VERSION : + assert getLayoutVersion() > HdfsServerConstants.NAMENODE_LAYOUT_VERSION : "getDeprecatedProperty should only be done when loading " + "storage from past versions during upgrade."; return deprecatedProperties.get(prop); http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 132b93e..979378a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -486,7 +486,7 @@ public class NameNode implements NameNodeStatusMXBean { public static URI getUri(InetSocketAddress namenode) { int port = namenode.getPort(); String portString = port == DEFAULT_PORT ? "" : (":"+port); - return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + namenode.getHostName()+portString); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 83686e0..3311609 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -21,8 +21,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_DE import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY; -import static org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_PATH_DEPTH; -import static org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_PATH_LENGTH; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.MAX_PATH_DEPTH; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.MAX_PATH_LENGTH; import static org.apache.hadoop.util.Time.now; import java.io.FileNotFoundException; @@ -87,7 +87,6 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.FSLimitException; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; @@ -117,6 +116,7 @@ import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; @@ -1500,9 +1500,9 @@ class NameNodeRpcServer implements NamenodeProtocols { * @throws IOException on layout version mismatch */ void verifyLayoutVersion(int version) throws IOException { - if (version != HdfsConstants.NAMENODE_LAYOUT_VERSION) + if (version != HdfsServerConstants.NAMENODE_LAYOUT_VERSION) throw new IncorrectVersionException( - HdfsConstants.NAMENODE_LAYOUT_VERSION, version, "data node"); + HdfsServerConstants.NAMENODE_LAYOUT_VERSION, version, "data node"); } private void verifySoftwareVersion(DatanodeRegistration dnReg) http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java index 674a957..33be8b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java @@ -23,7 +23,7 @@ import java.util.Collection; import java.util.Comparator; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.io.IOUtils; import com.google.common.base.Preconditions; @@ -88,8 +88,8 @@ class RedundantEditLogInputStream extends EditLogInputStream { RedundantEditLogInputStream(Collection<EditLogInputStream> streams, long startTxId) { this.curIdx = 0; - this.prevTxId = (startTxId == HdfsConstants.INVALID_TXID) ? - HdfsConstants.INVALID_TXID : (startTxId - 1); + this.prevTxId = (startTxId == HdfsServerConstants.INVALID_TXID) ? + HdfsServerConstants.INVALID_TXID : (startTxId - 1); this.state = (streams.isEmpty()) ? State.EOF : State.SKIP_UNTIL; this.prevException = null; // EditLogInputStreams in a RedundantEditLogInputStream must be finalized, @@ -97,9 +97,9 @@ class RedundantEditLogInputStream extends EditLogInputStream { EditLogInputStream first = null; for (EditLogInputStream s : streams) { Preconditions.checkArgument(s.getFirstTxId() != - HdfsConstants.INVALID_TXID, "invalid first txid in stream: %s", s); + HdfsServerConstants.INVALID_TXID, "invalid first txid in stream: %s", s); Preconditions.checkArgument(s.getLastTxId() != - HdfsConstants.INVALID_TXID, "invalid last txid in stream: %s", s); + HdfsServerConstants.INVALID_TXID, "invalid last txid in stream: %s", s); if (first == null) { first = s; } else { @@ -172,7 +172,7 @@ class RedundantEditLogInputStream extends EditLogInputStream { switch (state) { case SKIP_UNTIL: try { - if (prevTxId != HdfsConstants.INVALID_TXID) { + if (prevTxId != HdfsServerConstants.INVALID_TXID) { LOG.info("Fast-forwarding stream '" + streams[curIdx].getName() + "' to transaction ID " + (prevTxId + 1)); streams[curIdx].skipUntil(prevTxId + 1); http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java index 0d32758..041c3cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java @@ -44,7 +44,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.StorageErrorReporter; @@ -336,7 +336,7 @@ public class TransferFsImage { private static void copyFileToStream(OutputStream out, File localfile, FileInputStream infile, DataTransferThrottler throttler, Canceler canceler) throws IOException { - byte buf[] = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE]; + byte buf[] = new byte[HdfsServerConstants.IO_FILE_BUFFER_SIZE]; try { CheckpointFaultInjector.getInstance() .aboutToSendFile(localfile); @@ -345,7 +345,7 @@ public class TransferFsImage { shouldSendShortFile(localfile)) { // Test sending image shorter than localfile long len = localfile.length(); - buf = new byte[(int)Math.min(len/2, HdfsConstants.IO_FILE_BUFFER_SIZE)]; + buf = new byte[(int)Math.min(len/2, HdfsServerConstants.IO_FILE_BUFFER_SIZE)]; // This will read at most half of the image // and the rest of the image will be sent over the wire infile.read(buf); @@ -510,7 +510,7 @@ public class TransferFsImage { } int num = 1; - byte[] buf = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE]; + byte[] buf = new byte[HdfsServerConstants.IO_FILE_BUFFER_SIZE]; while (num > 0) { num = stream.read(buf); if (num > 0) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java index 9d8f2f8..0accf53 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java @@ -40,7 +40,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.NameNodeProxies; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; @@ -167,7 +167,7 @@ public class BootstrapStandby implements Tool, Configurable { if (!checkLayoutVersion(nsInfo)) { LOG.fatal("Layout version on remote node (" + nsInfo.getLayoutVersion() + ") does not match " + "this node's layout version (" - + HdfsConstants.NAMENODE_LAYOUT_VERSION + ")"); + + HdfsServerConstants.NAMENODE_LAYOUT_VERSION + ")"); return ERR_CODE_INVALID_VERSION; } @@ -366,7 +366,7 @@ public class BootstrapStandby implements Tool, Configurable { } private boolean checkLayoutVersion(NamespaceInfo nsInfo) throws IOException { - return (nsInfo.getLayoutVersion() == HdfsConstants.NAMENODE_LAYOUT_VERSION); + return (nsInfo.getLayoutVersion() == HdfsServerConstants.NAMENODE_LAYOUT_VERSION); } private void parseConfAndFindOtherNN() throws IOException { http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java index 1897d8d..38aa358 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java @@ -31,9 +31,9 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HAUtil; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.EditLogInputException; import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; import org.apache.hadoop.hdfs.server.namenode.FSEditLog; @@ -75,12 +75,12 @@ public class EditLogTailer { /** * The last transaction ID at which an edit log roll was initiated. */ - private long lastRollTriggerTxId = HdfsConstants.INVALID_TXID; + private long lastRollTriggerTxId = HdfsServerConstants.INVALID_TXID; /** * The highest transaction ID loaded by the Standby. */ - private long lastLoadedTxnId = HdfsConstants.INVALID_TXID; + private long lastLoadedTxnId = HdfsServerConstants.INVALID_TXID; /** * The last time we successfully loaded a non-zero number of edits from the http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java index 7baf7dc..c4406a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java @@ -21,7 +21,7 @@ import java.util.List; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.namenode.AclFeature; @@ -151,7 +151,7 @@ public class FileWithSnapshotFeature implements INode.Feature { BlockStoragePolicy bsp = null; EnumCounters<StorageType> typeSpaces = new EnumCounters<StorageType>(StorageType.class); - if (storagePolicyID != HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) { + if (storagePolicyID != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) { bsp = bsps.getPolicy(file.getStoragePolicyID()); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java index faaf8f4..d06d0db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java @@ -20,14 +20,14 @@ package org.apache.hadoop.hdfs.server.protocol; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; @InterfaceAudience.Private @InterfaceStability.Evolving public class NNHAStatusHeartbeat { private final HAServiceState state; - private long txid = HdfsConstants.INVALID_TXID; + private long txid = HdfsServerConstants.INVALID_TXID; public NNHAStatusHeartbeat(HAServiceState state, long txid) { this.state = state; http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java index a7439a0..dfdf449 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java @@ -22,7 +22,7 @@ import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; @@ -83,7 +83,7 @@ public class NamespaceInfo extends StorageInfo { public NamespaceInfo(int nsID, String clusterID, String bpID, long cT, String buildVersion, String softwareVersion, long capabilities) { - super(HdfsConstants.NAMENODE_LAYOUT_VERSION, nsID, clusterID, cT, + super(HdfsServerConstants.NAMENODE_LAYOUT_VERSION, nsID, clusterID, cT, NodeType.NAME_NODE); blockPoolID = bpID; this.buildVersion = buildVersion; http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java index 4b191f2..1d26bc4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java @@ -17,14 +17,13 @@ */ package org.apache.hadoop.hdfs.server.protocol; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; - import com.google.common.base.Function; import com.google.common.collect.ComparisonChain; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; public class RemoteEditLog implements Comparable<RemoteEditLog> { - private long startTxId = HdfsConstants.INVALID_TXID; - private long endTxId = HdfsConstants.INVALID_TXID; + private long startTxId = HdfsServerConstants.INVALID_TXID; + private long endTxId = HdfsServerConstants.INVALID_TXID; private boolean isInProgress = false; public RemoteEditLog() { @@ -33,7 +32,7 @@ public class RemoteEditLog implements Comparable<RemoteEditLog> { public RemoteEditLog(long startTxId, long endTxId) { this.startTxId = startTxId; this.endTxId = endTxId; - this.isInProgress = (endTxId == HdfsConstants.INVALID_TXID); + this.isInProgress = (endTxId == HdfsServerConstants.INVALID_TXID); } public RemoteEditLog(long startTxId, long endTxId, boolean inProgress) { @@ -90,7 +89,7 @@ public class RemoteEditLog implements Comparable<RemoteEditLog> { @Override public Long apply(RemoteEditLog log) { if (null == log) { - return HdfsConstants.INVALID_TXID; + return HdfsServerConstants.INVALID_TXID; } return log.getStartTxId(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java index ba1fd0f..e3bdffa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java @@ -22,7 +22,7 @@ import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.tools.TableListing; import org.apache.hadoop.util.StringUtils; @@ -151,7 +151,7 @@ public class StoragePolicyAdmin extends Configured implements Tool { return 2; } byte storagePolicyId = status.getStoragePolicy(); - if (storagePolicyId == HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) { + if (storagePolicyId == HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) { System.out.println("The storage policy of " + path + " is unspecified"); return 0; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java index 0ce1e78..73d1798 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java @@ -22,8 +22,7 @@ import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream; import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; @@ -49,8 +48,8 @@ interface OfflineEditsLoader { OfflineEditsLoader loader = null; try { file = new File(inputFileName); - elis = new EditLogFileInputStream(file, HdfsConstants.INVALID_TXID, - HdfsConstants.INVALID_TXID, false); + elis = new EditLogFileInputStream(file, HdfsServerConstants.INVALID_TXID, + HdfsServerConstants.INVALID_TXID, false); loader = new OfflineEditsBinaryLoader(visitor, elis, flags); } finally { if ((loader == null) && (elis != null)) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java index 3693239..f2c7427 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java @@ -28,7 +28,7 @@ import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutFlags; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; @@ -687,7 +687,7 @@ class ImageLoaderCurrent implements ImageLoader { final String pathName = readINodePath(in, parentName); v.visit(ImageElement.INODE_PATH, pathName); - long inodeId = HdfsConstantsClient.GRANDFATHER_INODE_ID; + long inodeId = HdfsConstants.GRANDFATHER_INODE_ID; if (supportInodeId) { inodeId = in.readLong(); v.visit(ImageElement.INODE_ID, inodeId); http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java index c98ba66..6d7ef55 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; @@ -259,7 +260,7 @@ abstract public class TestSymlinkHdfs extends SymlinkBaseTest { public void testCreateLinkMaxPathLink() throws IOException { Path dir = new Path(testBaseDir1()); Path file = new Path(testBaseDir1(), "file"); - final int maxPathLen = HdfsConstants.MAX_PATH_LENGTH; + final int maxPathLen = HdfsServerConstants.MAX_PATH_LENGTH; final int dirLen = dir.toString().length() + 1; int len = maxPathLen - dirLen; http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 681e9bd..a8df991 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -113,6 +113,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.StorageInfo; @@ -950,7 +951,7 @@ public class DFSTestUtil { final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length); final DataOutputStream out = new DataOutputStream(new BufferedOutputStream( NetUtils.getOutputStream(s, writeTimeout), - HdfsConstants.SMALL_BUFFER_SIZE)); + HdfsServerConstants.SMALL_BUFFER_SIZE)); final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s)); // send the request @@ -1222,7 +1223,7 @@ public class DFSTestUtil { s2.close(); // OP_SET_STORAGE_POLICY 45 filesystem.setStoragePolicy(pathFileCreate, - HdfsConstants.HOT_STORAGE_POLICY_NAME); + HdfsServerConstants.HOT_STORAGE_POLICY_NAME); // OP_RENAME_OLD 1 final Path pathFileMoved = new Path("/file_moved"); filesystem.rename(pathFileCreate, pathFileMoved); @@ -1689,8 +1690,7 @@ public class DFSTestUtil { modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL); field.setInt(null, lv); - // Override {@link HdfsConstants#DATANODE_LAYOUT_VERSION} - field = HdfsConstants.class.getField("DATANODE_LAYOUT_VERSION"); + field = HdfsServerConstants.class.getField("DATANODE_LAYOUT_VERSION"); field.setAccessible(true); modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL); field.setInt(null, lv); http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java index 89c8e11..9621dc8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs; -import static org.apache.hadoop.hdfs.protocol.HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; +import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; import java.io.File; import java.io.FileNotFoundException; @@ -32,6 +32,7 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.*; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.blockmanagement.*; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -67,12 +68,12 @@ public class TestBlockStoragePolicy { static final long FILE_LEN = 1024; static final short REPLICATION = 3; - static final byte COLD = HdfsConstants.COLD_STORAGE_POLICY_ID; - static final byte WARM = HdfsConstants.WARM_STORAGE_POLICY_ID; - static final byte HOT = HdfsConstants.HOT_STORAGE_POLICY_ID; - static final byte ONESSD = HdfsConstants.ONESSD_STORAGE_POLICY_ID; - static final byte ALLSSD = HdfsConstants.ALLSSD_STORAGE_POLICY_ID; - static final byte LAZY_PERSIST = HdfsConstants.MEMORY_STORAGE_POLICY_ID; + static final byte COLD = HdfsServerConstants.COLD_STORAGE_POLICY_ID; + static final byte WARM = HdfsServerConstants.WARM_STORAGE_POLICY_ID; + static final byte HOT = HdfsServerConstants.HOT_STORAGE_POLICY_ID; + static final byte ONESSD = HdfsServerConstants.ONESSD_STORAGE_POLICY_ID; + static final byte ALLSSD = HdfsServerConstants.ALLSSD_STORAGE_POLICY_ID; + static final byte LAZY_PERSIST = HdfsServerConstants.MEMORY_STORAGE_POLICY_ID; @Test (timeout=300000) public void testConfigKeyEnabled() throws IOException { @@ -83,7 +84,7 @@ public class TestBlockStoragePolicy { try { cluster.waitActive(); cluster.getFileSystem().setStoragePolicy(new Path("/"), - HdfsConstants.COLD_STORAGE_POLICY_NAME); + HdfsServerConstants.COLD_STORAGE_POLICY_NAME); } finally { cluster.shutdown(); } @@ -103,7 +104,7 @@ public class TestBlockStoragePolicy { try { cluster.waitActive(); cluster.getFileSystem().setStoragePolicy(new Path("/"), - HdfsConstants.COLD_STORAGE_POLICY_NAME); + HdfsServerConstants.COLD_STORAGE_POLICY_NAME); } finally { cluster.shutdown(); } @@ -860,15 +861,15 @@ public class TestBlockStoragePolicy { final Path invalidPath = new Path("/invalidPath"); try { - fs.setStoragePolicy(invalidPath, HdfsConstants.WARM_STORAGE_POLICY_NAME); + fs.setStoragePolicy(invalidPath, HdfsServerConstants.WARM_STORAGE_POLICY_NAME); Assert.fail("Should throw a FileNotFoundException"); } catch (FileNotFoundException e) { GenericTestUtils.assertExceptionContains(invalidPath.toString(), e); } - fs.setStoragePolicy(fooFile, HdfsConstants.COLD_STORAGE_POLICY_NAME); - fs.setStoragePolicy(barDir, HdfsConstants.WARM_STORAGE_POLICY_NAME); - fs.setStoragePolicy(barFile2, HdfsConstants.HOT_STORAGE_POLICY_NAME); + fs.setStoragePolicy(fooFile, HdfsServerConstants.COLD_STORAGE_POLICY_NAME); + fs.setStoragePolicy(barDir, HdfsServerConstants.WARM_STORAGE_POLICY_NAME); + fs.setStoragePolicy(barFile2, HdfsServerConstants.HOT_STORAGE_POLICY_NAME); dirList = fs.getClient().listPaths(dir.toString(), HdfsFileStatus.EMPTY_NAME).getPartialListing(); @@ -916,7 +917,7 @@ public class TestBlockStoragePolicy { DFSTestUtil.createFile(fs, fooFile1, FILE_LEN, REPLICATION, 0L); DFSTestUtil.createFile(fs, fooFile2, FILE_LEN, REPLICATION, 0L); - fs.setStoragePolicy(fooDir, HdfsConstants.WARM_STORAGE_POLICY_NAME); + fs.setStoragePolicy(fooDir, HdfsServerConstants.WARM_STORAGE_POLICY_NAME); HdfsFileStatus[] dirList = fs.getClient().listPaths(dir.toString(), HdfsFileStatus.EMPTY_NAME, true).getPartialListing(); @@ -928,7 +929,7 @@ public class TestBlockStoragePolicy { // take snapshot SnapshotTestHelper.createSnapshot(fs, dir, "s1"); // change the storage policy of fooFile1 - fs.setStoragePolicy(fooFile1, HdfsConstants.COLD_STORAGE_POLICY_NAME); + fs.setStoragePolicy(fooFile1, HdfsServerConstants.COLD_STORAGE_POLICY_NAME); fooList = fs.getClient().listPaths(fooDir.toString(), HdfsFileStatus.EMPTY_NAME).getPartialListing(); @@ -951,7 +952,7 @@ public class TestBlockStoragePolicy { HdfsFileStatus.EMPTY_NAME).getPartialListing(), COLD); // change the storage policy of foo dir - fs.setStoragePolicy(fooDir, HdfsConstants.HOT_STORAGE_POLICY_NAME); + fs.setStoragePolicy(fooDir, HdfsServerConstants.HOT_STORAGE_POLICY_NAME); // /dir/foo is now hot dirList = fs.getClient().listPaths(dir.toString(), HdfsFileStatus.EMPTY_NAME, true).getPartialListing(); @@ -1068,7 +1069,7 @@ public class TestBlockStoragePolicy { */ @Test public void testChangeHotFileRep() throws Exception { - testChangeFileRep(HdfsConstants.HOT_STORAGE_POLICY_NAME, HOT, + testChangeFileRep(HdfsServerConstants.HOT_STORAGE_POLICY_NAME, HOT, new StorageType[]{StorageType.DISK, StorageType.DISK, StorageType.DISK}, new StorageType[]{StorageType.DISK, StorageType.DISK, StorageType.DISK, @@ -1082,7 +1083,7 @@ public class TestBlockStoragePolicy { */ @Test public void testChangeWarmRep() throws Exception { - testChangeFileRep(HdfsConstants.WARM_STORAGE_POLICY_NAME, WARM, + testChangeFileRep(HdfsServerConstants.WARM_STORAGE_POLICY_NAME, WARM, new StorageType[]{StorageType.DISK, StorageType.ARCHIVE, StorageType.ARCHIVE}, new StorageType[]{StorageType.DISK, StorageType.ARCHIVE, @@ -1095,7 +1096,7 @@ public class TestBlockStoragePolicy { */ @Test public void testChangeColdRep() throws Exception { - testChangeFileRep(HdfsConstants.COLD_STORAGE_POLICY_NAME, COLD, + testChangeFileRep(HdfsServerConstants.COLD_STORAGE_POLICY_NAME, COLD, new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE}, new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE, http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java index 68687ed..3fe5626 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java @@ -30,7 +30,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.StorageInfo; @@ -191,7 +191,7 @@ public class TestDFSRollback { UpgradeUtilities.getCurrentBlockPoolID(cluster)); // Put newer layout version in current. storageInfo = new StorageInfo( - HdfsConstants.DATANODE_LAYOUT_VERSION - 1, + HdfsServerConstants.DATANODE_LAYOUT_VERSION - 1, UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentClusterID(cluster), UpgradeUtilities.getCurrentFsscTime(cluster), @@ -277,7 +277,7 @@ public class TestDFSRollback { UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current"); baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous"); - storageInfo = new StorageInfo(HdfsConstants.DATANODE_LAYOUT_VERSION, + storageInfo = new StorageInfo(HdfsServerConstants.DATANODE_LAYOUT_VERSION, UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE, NodeType.DATA_NODE); http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java index 889e579..3a27be6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java @@ -27,7 +27,7 @@ import java.io.File; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; @@ -89,7 +89,7 @@ public class TestDFSStartupVersions { */ private StorageData[] initializeVersions() throws Exception { int layoutVersionOld = Storage.LAST_UPGRADABLE_LAYOUT_VERSION; - int layoutVersionCur = HdfsConstants.DATANODE_LAYOUT_VERSION; + int layoutVersionCur = HdfsServerConstants.DATANODE_LAYOUT_VERSION; int layoutVersionNew = Integer.MIN_VALUE; int namespaceIdCur = UpgradeUtilities.getCurrentNamespaceID(null); int namespaceIdOld = Integer.MIN_VALUE; @@ -200,7 +200,7 @@ public class TestDFSStartupVersions { return false; } // check #3 - int softwareLV = HdfsConstants.DATANODE_LAYOUT_VERSION; + int softwareLV = HdfsServerConstants.DATANODE_LAYOUT_VERSION; int storedLV = datanodeVer.getLayoutVersion(); if (softwareLV == storedLV && datanodeVer.getCTime() == namenodeVer.getCTime()) @@ -252,7 +252,7 @@ public class TestDFSStartupVersions { .startupOption(StartupOption.REGULAR) .build(); StorageData nameNodeVersion = new StorageData( - HdfsConstants.NAMENODE_LAYOUT_VERSION, + HdfsServerConstants.NAMENODE_LAYOUT_VERSION, UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentClusterID(cluster), UpgradeUtilities.getCurrentFsscTime(cluster), http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java index f0a094e..544537c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java @@ -37,9 +37,9 @@ import java.util.regex.Pattern; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; @@ -307,7 +307,7 @@ public class TestDFSUpgrade { UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current"); cluster = createCluster(); baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current"); - storageInfo = new StorageInfo(HdfsConstants.DATANODE_LAYOUT_VERSION, + storageInfo = new StorageInfo(HdfsServerConstants.DATANODE_LAYOUT_VERSION, UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE, NodeType.DATA_NODE); http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java index 15b3fb1..211e6aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java @@ -22,9 +22,9 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -33,7 +33,6 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.VersionInfo; import org.junit.Test; -import java.net.InetAddress; import java.net.InetSocketAddress; import java.security.Permission; @@ -180,7 +179,7 @@ public class TestDatanodeRegistration { .getCTime(); StorageInfo mockStorageInfo = mock(StorageInfo.class); doReturn(nnCTime).when(mockStorageInfo).getCTime(); - doReturn(HdfsConstants.DATANODE_LAYOUT_VERSION).when(mockStorageInfo) + doReturn(HdfsServerConstants.DATANODE_LAYOUT_VERSION).when(mockStorageInfo) .getLayoutVersion(); DatanodeRegistration dnReg = new DatanodeRegistration(dnId, mockStorageInfo, null, VersionInfo.getVersion()); @@ -225,7 +224,7 @@ public class TestDatanodeRegistration { doReturn(nnCTime).when(mockStorageInfo).getCTime(); DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class); - doReturn(HdfsConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion(); + doReturn(HdfsServerConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion(); doReturn("127.0.0.1").when(mockDnReg).getIpAddr(); doReturn(123).when(mockDnReg).getXferPort(); doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid(); @@ -274,7 +273,7 @@ public class TestDatanodeRegistration { doReturn(nnCTime).when(mockStorageInfo).getCTime(); DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class); - doReturn(HdfsConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion(); + doReturn(HdfsServerConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion(); doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid(); doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java index ca25018..969f2b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java @@ -33,15 +33,14 @@ import java.util.concurrent.atomic.AtomicReference; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; @@ -111,7 +110,7 @@ public class TestFileAppend4 { // set the soft limit to be 1 second so that the // namenode triggers lease recovery upon append request - cluster.setLeasePeriod(1000, HdfsConstants.LEASE_HARDLIMIT_PERIOD); + cluster.setLeasePeriod(1000, HdfsServerConstants.LEASE_HARDLIMIT_PERIOD); // Trying recovery int tries = 60; http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java index 0a7b712..f56ff9e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java @@ -70,7 +70,6 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; @@ -537,7 +536,7 @@ public class TestFileCreation { // add one block to the file LocatedBlock location = client.getNamenode().addBlock(file1.toString(), - client.clientName, null, null, HdfsConstantsClient.GRANDFATHER_INODE_ID, null); + client.clientName, null, null, HdfsConstants.GRANDFATHER_INODE_ID, null); System.out.println("testFileCreationError2: " + "Added block " + location.getBlock()); @@ -588,7 +587,7 @@ public class TestFileCreation { createFile(dfs, f, 3); try { cluster.getNameNodeRpc().addBlock(f.toString(), client.clientName, - null, null, HdfsConstantsClient.GRANDFATHER_INODE_ID, null); + null, null, HdfsConstants.GRANDFATHER_INODE_ID, null); fail(); } catch(IOException ioe) { FileSystem.LOG.info("GOOD!", ioe); http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java index 8ff8655..103151e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java @@ -34,8 +34,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; @@ -278,7 +278,7 @@ public class TestGetBlocks { for (int i = 0; i < blkids.length; i++) { Block b = new Block(blkids[i], 0, - HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP); + HdfsConstants.GRANDFATHER_GENERATION_STAMP); Long v = map.get(b); System.out.println(b + " => " + v); assertEquals(blkids[i], v.longValue()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java index 9b5a7c0..88dbd5e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java @@ -18,13 +18,10 @@ package org.apache.hadoop.hdfs; import static org.mockito.Matchers.anyBoolean; -import static org.mockito.Matchers.anyList; import static org.mockito.Matchers.anyString; import static org.mockito.Matchers.anyShort; import static org.mockito.Matchers.anyLong; import static org.mockito.Matchers.anyObject; -import static org.mockito.Matchers.anyShort; -import static org.mockito.Matchers.anyString; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.spy; @@ -32,12 +29,10 @@ import static org.mockito.Mockito.spy; import java.io.DataOutputStream; import java.io.IOException; import java.security.PrivilegedExceptionAction; -import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.CryptoProtocolVersion; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataOutputStream; @@ -47,8 +42,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.client.impl.LeaseRenewer; import org.apache.hadoop.hdfs.protocol.ClientProtocol; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.io.EnumSetWritable; @@ -103,7 +98,7 @@ public class TestLease { // make it look like the soft limit has been exceeded. LeaseRenewer originalRenewer = dfs.getLeaseRenewer(); dfs.lastLeaseRenewal = Time.monotonicNow() - - HdfsConstants.LEASE_SOFTLIMIT_PERIOD - 1000; + - HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD - 1000; try { dfs.renewLease(); } catch (IOException e) {} @@ -119,7 +114,7 @@ public class TestLease { // make it look like the hard limit has been exceeded. dfs.lastLeaseRenewal = Time.monotonicNow() - - HdfsConstants.LEASE_HARDLIMIT_PERIOD - 1000; + - HdfsServerConstants.LEASE_HARDLIMIT_PERIOD - 1000; dfs.renewLease(); // this should not work. http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java index 6d981fb..b8aac28 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java @@ -38,7 +38,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; @@ -333,8 +332,8 @@ public class TestLeaseRecovery2 { DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map); // Reset default lease periods - cluster.setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD, - HdfsConstants.LEASE_HARDLIMIT_PERIOD); + cluster.setLeasePeriod(HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD, + HdfsServerConstants.LEASE_HARDLIMIT_PERIOD); //create a file // create a random file name String filestr = "/foo" + AppendTestUtil.nextInt(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java index e9891bf..9ac58ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java @@ -37,9 +37,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.LayoutVersion; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; @@ -533,7 +533,7 @@ public class UpgradeUtilities { * of the Namenode, whether it is running or not. */ public static int getCurrentNameNodeLayoutVersion() { - return HdfsConstants.NAMENODE_LAYOUT_VERSION; + return HdfsServerConstants.NAMENODE_LAYOUT_VERSION; } /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java index 2e84499..2115671 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java @@ -34,11 +34,11 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.qjournal.QJMTestUtil; import org.apache.hadoop.hdfs.qjournal.client.IPCLoggerChannel; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; @@ -185,7 +185,7 @@ public class TestJournalNode { byte[] retrievedViaHttp = DFSTestUtil.urlGetBytes(new URL(urlRoot + "/getJournal?segmentTxId=1&jid=" + journalId)); byte[] expected = Bytes.concat( - Ints.toByteArray(HdfsConstants.NAMENODE_LAYOUT_VERSION), + Ints.toByteArray(HdfsServerConstants.NAMENODE_LAYOUT_VERSION), (new byte[] { 0, 0, 0, 0 }), // layout flags section EDITS_DATA); http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 91abb2a..9ce16f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -44,8 +44,8 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; @@ -88,7 +88,8 @@ public class TestBlockManager { @Before public void setupMockCluster() throws IOException { Configuration conf = new HdfsConfiguration(); - conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, "need to set a dummy value here so it assumes a multi-rack cluster"); + conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, + "need to set a dummy value here so it assumes a multi-rack cluster"); fsn = Mockito.mock(FSNamesystem.class); Mockito.doReturn(true).when(fsn).hasWriteLock(); bm = new BlockManager(fsn, conf); @@ -111,8 +112,8 @@ public class TestBlockManager { for (DatanodeDescriptor dn : nodesToAdd) { cluster.add(dn); dn.getStorageInfos()[0].setUtilizationForTesting( - 2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - 2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L); + 2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + 2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L); dn.updateHeartbeat( BlockManagerTestUtil.getStorageReportsForDatanode(dn), 0L, 0L, 0, 0, null); http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 296003f..1d6dad8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -50,8 +50,8 @@ import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.TestBlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.StatefulBlockInfo; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.DataNode; @@ -138,8 +138,8 @@ public class TestReplicationPolicy { } for (int i=0; i < NUM_OF_DATANODES; i++) { updateHeartbeatWithUsage(dataNodes[i], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } } @@ -162,8 +162,8 @@ public class TestReplicationPolicy { @Test public void testChooseTarget1() throws Exception { updateHeartbeatWithUsage(dataNodes[0], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 4, 0); // overloaded DatanodeStorageInfo[] targets; @@ -193,8 +193,8 @@ public class TestReplicationPolicy { assertFalse(isOnSameRack(targets[0], targets[2])); updateHeartbeatWithUsage(dataNodes[0], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } private static DatanodeStorageInfo[] chooseTarget(int numOfReplicas) { @@ -316,8 +316,8 @@ public class TestReplicationPolicy { public void testChooseTarget3() throws Exception { // make data node 0 to be not qualified to choose updateHeartbeatWithUsage(dataNodes[0], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); // no space DatanodeStorageInfo[] targets; @@ -350,8 +350,8 @@ public class TestReplicationPolicy { assertFalse(isOnSameRack(targets[1], targets[3])); updateHeartbeatWithUsage(dataNodes[0], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } /** @@ -367,8 +367,8 @@ public class TestReplicationPolicy { // make data node 0 & 1 to be not qualified to choose: not enough disk space for(int i=0; i<2; i++) { updateHeartbeatWithUsage(dataNodes[i], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } DatanodeStorageInfo[] targets; @@ -395,8 +395,8 @@ public class TestReplicationPolicy { for(int i=0; i<2; i++) { updateHeartbeatWithUsage(dataNodes[i], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } } @@ -459,8 +459,8 @@ public class TestReplicationPolicy { bm.getDatanodeManager().getNetworkTopology().add(newDn); bm.getDatanodeManager().getHeartbeatManager().addDatanode(newDn); updateHeartbeatWithUsage(newDn, - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); // Try picking three nodes. Only two should return. excludedNodes.clear(); @@ -506,8 +506,8 @@ public class TestReplicationPolicy { // make data node 0 & 1 to be not qualified to choose: not enough disk space for(int i=0; i<2; i++) { updateHeartbeatWithUsage(dataNodes[i], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } final LogVerificationAppender appender = new LogVerificationAppender(); @@ -531,8 +531,8 @@ public class TestReplicationPolicy { for(int i=0; i<2; i++) { updateHeartbeatWithUsage(dataNodes[i], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java index d514768..7ff2930 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java @@ -34,8 +34,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.TestBlockStoragePolicy; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -93,8 +93,8 @@ public class TestReplicationPolicyConsiderLoad { dnrList.add(dnr); dnManager.registerDatanode(dnr); dataNodes[i].getStorageInfos()[0].setUtilizationForTesting( - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*blockSize, 0L, - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*blockSize, 0L); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*blockSize, 0L, + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*blockSize, 0L); dataNodes[i].updateHeartbeat( BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[i]), 0L, 0L, 0, 0, null);
