http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java index 7708ddc..ed54aeb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.TestBlockStoragePolicy; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NetworkTopologyWithNodeGroup; @@ -191,8 +191,8 @@ public class TestReplicationPolicyWithNodeGroup { private static void setupDataNodeCapacity() { for(int i=0; i<NUM_OF_DATANODES; i++) { updateHeartbeatWithUsage(dataNodes[i], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } } @@ -274,8 +274,8 @@ public class TestReplicationPolicyWithNodeGroup { @Test public void testChooseTarget1() throws Exception { updateHeartbeatWithUsage(dataNodes[0], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 4, 0); // overloaded DatanodeStorageInfo[] targets; @@ -312,8 +312,8 @@ public class TestReplicationPolicyWithNodeGroup { verifyNoTwoTargetsOnSameNodeGroup(targets); updateHeartbeatWithUsage(dataNodes[0], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } private void verifyNoTwoTargetsOnSameNodeGroup(DatanodeStorageInfo[] targets) { @@ -380,8 +380,8 @@ public class TestReplicationPolicyWithNodeGroup { public void testChooseTarget3() throws Exception { // make data node 0 to be not qualified to choose updateHeartbeatWithUsage(dataNodes[0], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); // no space DatanodeStorageInfo[] targets; @@ -412,8 +412,8 @@ public class TestReplicationPolicyWithNodeGroup { isOnSameRack(targets[2], targets[3])); updateHeartbeatWithUsage(dataNodes[0], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } /** @@ -430,8 +430,8 @@ public class TestReplicationPolicyWithNodeGroup { // make data node 0-2 to be not qualified to choose: not enough disk space for(int i=0; i<3; i++) { updateHeartbeatWithUsage(dataNodes[i], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } DatanodeStorageInfo[] targets; @@ -661,13 +661,13 @@ public class TestReplicationPolicyWithNodeGroup { } for(int i=0; i<NUM_OF_DATANODES_BOUNDARY; i++) { updateHeartbeatWithUsage(dataNodes[0], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); updateHeartbeatWithUsage(dataNodesInBoundaryCase[i], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } DatanodeStorageInfo[] targets; @@ -697,8 +697,8 @@ public class TestReplicationPolicyWithNodeGroup { public void testRereplicateOnBoundaryTopology() throws Exception { for(int i=0; i<NUM_OF_DATANODES_BOUNDARY; i++) { updateHeartbeatWithUsage(dataNodesInBoundaryCase[i], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>(); chosenNodes.add(storagesInBoundaryCase[0]); @@ -735,8 +735,8 @@ public class TestReplicationPolicyWithNodeGroup { for(int i=0; i<NUM_OF_DATANODES_MORE_TARGETS; i++) { updateHeartbeatWithUsage(dataNodesInMoreTargetsCase[i], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } DatanodeStorageInfo[] targets; @@ -786,8 +786,8 @@ public class TestReplicationPolicyWithNodeGroup { //Update heartbeat for(int i=0; i<NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) { updateHeartbeatWithUsage(dataNodesForDependencies[i], - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java index 3c16743..216ff3d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java @@ -29,8 +29,8 @@ import java.net.InetSocketAddress; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.test.GenericTestUtils; @@ -67,7 +67,7 @@ public class TestDatanodeRegister { // Return a a good software version. doReturn(VersionInfo.getVersion()).when(fakeNsInfo).getSoftwareVersion(); // Return a good layout version for now. - doReturn(HdfsConstants.NAMENODE_LAYOUT_VERSION).when(fakeNsInfo) + doReturn(HdfsServerConstants.NAMENODE_LAYOUT_VERSION).when(fakeNsInfo) .getLayoutVersion(); DatanodeProtocolClientSideTranslatorPB fakeDnProt = @@ -104,12 +104,12 @@ public class TestDatanodeRegister { @Test public void testDifferentLayoutVersions() throws Exception { // We expect no exceptions to be thrown when the layout versions match. - assertEquals(HdfsConstants.NAMENODE_LAYOUT_VERSION, + assertEquals(HdfsServerConstants.NAMENODE_LAYOUT_VERSION, actor.retrieveNamespaceInfo().getLayoutVersion()); // We expect an exception to be thrown when the NN reports a layout version // different from that of the DN. - doReturn(HdfsConstants.NAMENODE_LAYOUT_VERSION * 1000).when(fakeNsInfo) + doReturn(HdfsServerConstants.NAMENODE_LAYOUT_VERSION * 1000).when(fakeNsInfo) .getLayoutVersion(); try { actor.retrieveNamespaceInfo(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java index fa7a838..150b5e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java @@ -47,7 +47,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference; @@ -405,7 +405,7 @@ public class TestDirectoryScanner { // Test2: block metafile is missing long blockId = deleteMetaFile(); scan(totalBlocks, 1, 1, 0, 0, 1); - verifyGenStamp(blockId, HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP); + verifyGenStamp(blockId, HdfsConstants.GRANDFATHER_GENERATION_STAMP); scan(totalBlocks, 0, 0, 0, 0, 0); // Test3: block file is missing @@ -420,7 +420,7 @@ public class TestDirectoryScanner { blockId = createBlockFile(); totalBlocks++; scan(totalBlocks, 1, 1, 0, 1, 0); - verifyAddition(blockId, HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP, 0); + verifyAddition(blockId, HdfsConstants.GRANDFATHER_GENERATION_STAMP, 0); scan(totalBlocks, 0, 0, 0, 0, 0); // Test5: A metafile exists for which there is no block file and http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java index 7eaf5c7..a5d5848 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.DirectoryListing; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -54,6 +53,7 @@ import org.apache.hadoop.hdfs.server.balancer.ExitStatus; import org.apache.hadoop.hdfs.server.balancer.TestBalancer; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; @@ -98,9 +98,9 @@ public class TestStorageMover { DEFAULT_CONF.setLong(DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY, 2000L); DEFAULT_POLICIES = BlockStoragePolicySuite.createDefaultSuite(); - HOT = DEFAULT_POLICIES.getPolicy(HdfsConstants.HOT_STORAGE_POLICY_NAME); - WARM = DEFAULT_POLICIES.getPolicy(HdfsConstants.WARM_STORAGE_POLICY_NAME); - COLD = DEFAULT_POLICIES.getPolicy(HdfsConstants.COLD_STORAGE_POLICY_NAME); + HOT = DEFAULT_POLICIES.getPolicy(HdfsServerConstants.HOT_STORAGE_POLICY_NAME); + WARM = DEFAULT_POLICIES.getPolicy(HdfsServerConstants.WARM_STORAGE_POLICY_NAME); + COLD = DEFAULT_POLICIES.getPolicy(HdfsServerConstants.COLD_STORAGE_POLICY_NAME); TestBalancer.initTestSetup(); Dispatcher.setDelayAfterErrors(1000L); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index 360261d..db0185d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; @@ -596,7 +595,7 @@ public class NNThroughputBenchmark implements Tool { long end = Time.now(); for(boolean written = !closeUponCreate; !written; written = nameNodeProto.complete(fileNames[daemonId][inputIdx], - clientName, null, HdfsConstantsClient.GRANDFATHER_INODE_ID)); + clientName, null, HdfsConstants.GRANDFATHER_INODE_ID)); return end-start; } @@ -1142,7 +1141,7 @@ public class NNThroughputBenchmark implements Tool { new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication, BLOCK_SIZE, null); ExtendedBlock lastBlock = addBlocks(fileName, clientName); - nameNodeProto.complete(fileName, clientName, lastBlock, HdfsConstantsClient.GRANDFATHER_INODE_ID); + nameNodeProto.complete(fileName, clientName, lastBlock, HdfsConstants.GRANDFATHER_INODE_ID); } // prepare block reports for(int idx=0; idx < nrDatanodes; idx++) { @@ -1155,7 +1154,7 @@ public class NNThroughputBenchmark implements Tool { ExtendedBlock prevBlock = null; for(int jdx = 0; jdx < blocksPerFile; jdx++) { LocatedBlock loc = nameNodeProto.addBlock(fileName, clientName, - prevBlock, null, HdfsConstantsClient.GRANDFATHER_INODE_ID, null); + prevBlock, null, HdfsConstants.GRANDFATHER_INODE_ID, null); prevBlock = loc.getBlock(); for(DatanodeInfo dnInfo : loc.getLocations()) { int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getXferAddr()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java index 2d29a68..5a4134c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java @@ -30,7 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; @@ -89,14 +89,14 @@ public class TestAddBlockRetry { LOG.info("Starting first addBlock for " + src); LocatedBlock[] onRetryBlock = new LocatedBlock[1]; DatanodeStorageInfo targets[] = ns.getNewBlockTargets( - src, HdfsConstantsClient.GRANDFATHER_INODE_ID, "clientName", + src, HdfsConstants.GRANDFATHER_INODE_ID, "clientName", null, null, null, onRetryBlock); assertNotNull("Targets must be generated", targets); // run second addBlock() LOG.info("Starting second addBlock for " + src); nn.addBlock(src, "clientName", null, null, - HdfsConstantsClient.GRANDFATHER_INODE_ID, null); + HdfsConstants.GRANDFATHER_INODE_ID, null); assertTrue("Penultimate block must be complete", checkFileProgress(src, false)); LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE); @@ -106,7 +106,7 @@ public class TestAddBlockRetry { // continue first addBlock() LocatedBlock newBlock = ns.storeAllocatedBlock( - src, HdfsConstantsClient.GRANDFATHER_INODE_ID, "clientName", null, targets); + src, HdfsConstants.GRANDFATHER_INODE_ID, "clientName", null, targets); assertEquals("Blocks are not equal", lb2.getBlock(), newBlock.getBlock()); // check locations @@ -144,14 +144,14 @@ public class TestAddBlockRetry { // start first addBlock() LOG.info("Starting first addBlock for " + src); LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null, - HdfsConstantsClient.GRANDFATHER_INODE_ID, null); + HdfsConstants.GRANDFATHER_INODE_ID, null); assertTrue("Block locations should be present", lb1.getLocations().length > 0); cluster.restartNameNode(); nameNodeRpc = cluster.getNameNodeRpc(); LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null, - HdfsConstantsClient.GRANDFATHER_INODE_ID, null); + HdfsConstants.GRANDFATHER_INODE_ID, null); assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock()); assertTrue("Wrong locations with retry", lb2.getLocations().length > 0); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index 6d8d205..1e42e34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -68,9 +68,9 @@ import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; @@ -880,12 +880,12 @@ public class TestEditLog { @Override public long getFirstTxId() { - return HdfsConstants.INVALID_TXID; + return HdfsServerConstants.INVALID_TXID; } @Override public long getLastTxId() { - return HdfsConstants.INVALID_TXID; + return HdfsServerConstants.INVALID_TXID; } @Override http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java index c3d2997..c0eb890 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java @@ -29,7 +29,7 @@ import java.net.HttpURLConnection; import java.net.URL; import java.util.EnumMap; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.util.Holder; import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.junit.Test; @@ -51,7 +51,7 @@ public class TestEditLogFileInputStream { URL url = new URL("http://localhost/fakeLog"); EditLogInputStream elis = EditLogFileInputStream.fromUrl(factory, url, - HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID, false); + HdfsServerConstants.INVALID_TXID, HdfsServerConstants.INVALID_TXID, false); // Read the edit log and verify that we got all of the data. EnumMap<FSEditLogOpCodes, Holder<Integer>> counts = FSImageTestUtil .countEditLogOpTypes(elis); http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java index 833ef95..bc55d12 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java @@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; @@ -365,7 +365,7 @@ public class TestFSEditLogLoader { truncateFile(logFile, txOffset); validation = EditLogFileInputStream.validateEditLog(logFile); long expectedEndTxId = (txId == 0) ? - HdfsConstants.INVALID_TXID : (txId - 1); + HdfsServerConstants.INVALID_TXID : (txId - 1); assertEquals("Failed when corrupting txid " + txId + " txn opcode " + "at " + txOffset, expectedEndTxId, validation.getEndTxId()); assertTrue(!validation.hasCorruptHeader()); @@ -383,7 +383,7 @@ public class TestFSEditLogLoader { EditLogValidation validation = EditLogFileInputStream.validateEditLog(logFile); assertTrue(!validation.hasCorruptHeader()); - assertEquals(HdfsConstants.INVALID_TXID, validation.getEndTxId()); + assertEquals(HdfsServerConstants.INVALID_TXID, validation.getEndTxId()); } private static final Map<Byte, FSEditLogOpCodes> byteToEnum = http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java index d11b783..9903906 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java @@ -47,7 +47,7 @@ import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; @@ -430,7 +430,7 @@ public class TestFSPermissionChecker { PermissionStatus permStatus = PermissionStatus.createImmutable(owner, group, FsPermission.createImmutable(perm)); INodeDirectory inodeDirectory = new INodeDirectory( - HdfsConstantsClient.GRANDFATHER_INODE_ID, name.getBytes("UTF-8"), permStatus, 0L); + HdfsConstants.GRANDFATHER_INODE_ID, name.getBytes("UTF-8"), permStatus, 0L); parent.addChild(inodeDirectory); return inodeDirectory; } @@ -439,7 +439,7 @@ public class TestFSPermissionChecker { String owner, String group, short perm) throws IOException { PermissionStatus permStatus = PermissionStatus.createImmutable(owner, group, FsPermission.createImmutable(perm)); - INodeFile inodeFile = new INodeFile(HdfsConstantsClient.GRANDFATHER_INODE_ID, + INodeFile inodeFile = new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, name.getBytes("UTF-8"), permStatus, 0L, 0L, null, REPLICATION, PREFERRED_BLOCK_SIZE); parent.addChild(inodeFile); http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index fbcc73f..bd19a5b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -51,7 +51,6 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -650,8 +649,8 @@ public class TestFileTruncate { checkBlockRecovery(p); NameNodeAdapter.getLeaseManager(cluster.getNamesystem()) - .setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD, - HdfsConstants.LEASE_HARDLIMIT_PERIOD); + .setLeasePeriod(HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD, + HdfsServerConstants.LEASE_HARDLIMIT_PERIOD); checkFullFile(p, newLength, contents); fs.delete(p, false); http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index 4e6c59a..b45d2f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -55,7 +55,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.DirectoryListing; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -86,12 +86,12 @@ public class TestINodeFile { private long preferredBlockSize = 1024; INodeFile createINodeFile(short replication, long preferredBlockSize) { - return new INodeFile(HdfsConstantsClient.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, + return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, replication, preferredBlockSize); } private static INodeFile createINodeFile(byte storagePolicyID) { - return new INodeFile(HdfsConstantsClient.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, + return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, (short)3, 1024L, storagePolicyID); } @@ -194,9 +194,9 @@ public class TestINodeFile { INodeFile inf = createINodeFile(replication, preferredBlockSize); inf.setLocalName(DFSUtil.string2Bytes("f")); - INodeDirectory root = new INodeDirectory(HdfsConstantsClient.GRANDFATHER_INODE_ID, + INodeDirectory root = new INodeDirectory(HdfsConstants.GRANDFATHER_INODE_ID, INodeDirectory.ROOT_NAME, perm, 0L); - INodeDirectory dir = new INodeDirectory(HdfsConstantsClient.GRANDFATHER_INODE_ID, + INodeDirectory dir = new INodeDirectory(HdfsConstants.GRANDFATHER_INODE_ID, DFSUtil.string2Bytes("d"), perm, 0L); assertEquals("f", inf.getFullPathName()); @@ -345,7 +345,7 @@ public class TestINodeFile { {//cast from INodeFileUnderConstruction final INode from = new INodeFile( - HdfsConstantsClient.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, replication, + HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, replication, 1024L); from.asFile().toUnderConstruction("client", "machine"); @@ -363,7 +363,7 @@ public class TestINodeFile { } {//cast from INodeDirectory - final INode from = new INodeDirectory(HdfsConstantsClient.GRANDFATHER_INODE_ID, null, + final INode from = new INodeDirectory(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L); //cast to INodeFile, should fail @@ -1108,7 +1108,7 @@ public class TestINodeFile { @Test public void testFileUnderConstruction() { replication = 3; - final INodeFile file = new INodeFile(HdfsConstantsClient.GRANDFATHER_INODE_ID, null, + final INodeFile file = new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, replication, 1024L); assertFalse(file.isUnderConstruction()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java index 03c7557..4e4ed0c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java @@ -22,8 +22,8 @@ import static org.junit.Assert.assertTrue; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.junit.After; import org.junit.Test; @@ -79,7 +79,7 @@ public class TestMetadataVersionOutput { assertExceptionContains("ExitException", e); } /* Check if meta data version is printed correctly. */ - final String verNumStr = HdfsConstants.NAMENODE_LAYOUT_VERSION + ""; + final String verNumStr = HdfsServerConstants.NAMENODE_LAYOUT_VERSION + ""; assertTrue(baos.toString("UTF-8"). contains("HDFS Image Version: " + verNumStr)); assertTrue(baos.toString("UTF-8"). http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java index a3582ce..7ee49a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java @@ -22,7 +22,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.junit.Assert; @@ -70,11 +70,11 @@ public class TestNameNodeOptionParsing { opt = NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved"}); assertEquals(StartupOption.UPGRADE, opt); assertEquals( - ".snapshot." + HdfsConstants.NAMENODE_LAYOUT_VERSION + ".snapshot." + HdfsServerConstants.NAMENODE_LAYOUT_VERSION + ".UPGRADE_RENAMED", FSImageFormat.renameReservedMap.get(".snapshot")); assertEquals( - ".reserved." + HdfsConstants.NAMENODE_LAYOUT_VERSION + ".reserved." + HdfsServerConstants.NAMENODE_LAYOUT_VERSION + ".UPGRADE_RENAMED", FSImageFormat.renameReservedMap.get(".reserved")); http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java index a4cb97f..49d01c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.junit.After; import org.junit.Assert; @@ -68,7 +68,7 @@ public class TestTruncateQuotaUpdate { dfs.mkdirs(dir); dfs.setQuota(dir, Long.MAX_VALUE - 1, DISKQUOTA); dfs.setQuotaByStorageType(dir, StorageType.DISK, DISKQUOTA); - dfs.setStoragePolicy(dir, HdfsConstants.HOT_STORAGE_POLICY_NAME); + dfs.setStoragePolicy(dir, HdfsServerConstants.HOT_STORAGE_POLICY_NAME); } @After http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java index a77b435..cb7cb35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java @@ -38,10 +38,10 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster; import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster.Builder; import org.apache.hadoop.hdfs.qjournal.server.Journal; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -304,7 +304,7 @@ public class TestDFSUpgradeWithHA { BestEffortLongFile committedTxnId = (BestEffortLongFile) Whitebox .getInternalState(journal1, "committedTxnId"); return committedTxnId != null ? committedTxnId.get() : - HdfsConstants.INVALID_TXID; + HdfsServerConstants.INVALID_TXID; } /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java index 29c6b10..f0c5482 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; @@ -196,7 +196,7 @@ public class TestOpenFilesWithSnapshot { String clientName = fs.getClient().getClientName(); // create one empty block nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null, - HdfsConstantsClient.GRANDFATHER_INODE_ID, null); + HdfsConstants.GRANDFATHER_INODE_ID, null); fs.createSnapshot(path, "s2"); fs.rename(new Path("/test/test"), new Path("/test/test-renamed")); http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java index b20e2ad..abceea4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java @@ -57,7 +57,6 @@ import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree.Node; import org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java index a5d5087..391f190 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java @@ -39,7 +39,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.util.Time; import org.codehaus.jackson.map.ObjectMapper; @@ -65,7 +65,7 @@ public class TestJsonUtil { final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26, now, now + 10, new FsPermission((short) 0644), "user", "group", DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"), - HdfsConstantsClient.GRANDFATHER_INODE_ID, 0, null, (byte) 0); + HdfsConstants.GRANDFATHER_INODE_ID, 0, null, (byte) 0); final FileStatus fstatus = toFileStatus(status, parent); System.out.println("status = " + status); System.out.println("fstatus = " + fstatus);
