Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java?rev=1598435&r1=1598434&r2=1598435&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java Thu May 29 22:27:25 2014 @@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.web.WebHdf import org.apache.hadoop.hdfs.web.WebHdfsTestUtil; import org.apache.hadoop.security.TestDoAsEffectiveUser; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.DefaultImpersonationProvider; import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.security.token.Token; import org.junit.AfterClass; @@ -88,7 +89,7 @@ public class TestDelegationTokenForProxy builder.append("127.0.1.1,"); builder.append(InetAddress.getLocalHost().getCanonicalHostName()); LOG.info("Local Ip addresses: " + builder.toString()); - conf.setStrings(ProxyUsers.getProxySuperuserIpConfKey(superUserShortName), + conf.setStrings(DefaultImpersonationProvider.getProxySuperuserIpConfKey(superUserShortName), builder.toString()); } @@ -100,7 +101,7 @@ public class TestDelegationTokenForProxy DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000); config.setLong( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000); - config.setStrings(ProxyUsers.getProxySuperuserGroupConfKey(REAL_USER), + config.setStrings(DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER), "group1"); config.setBoolean( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java?rev=1598435&r1=1598434&r2=1598435&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java Thu May 29 22:27:25 2014 @@ -30,6 +30,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.security.authorize.DefaultImpersonationProvider; import org.apache.hadoop.security.authorize.ProxyServers; import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.security.token.Token; @@ -284,8 +285,8 @@ public class TestJspHelper { String user = "TheNurse"; conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); - conf.set(ProxyUsers.getProxySuperuserGroupConfKey(realUser), "*"); - conf.set(ProxyUsers.getProxySuperuserIpConfKey(realUser), "*"); + conf.set(DefaultImpersonationProvider.getProxySuperuserGroupConfKey(realUser), "*"); + conf.set(DefaultImpersonationProvider.getProxySuperuserIpConfKey(realUser), "*"); ProxyUsers.refreshSuperUserGroupsConfiguration(conf); UserGroupInformation.setConfiguration(conf); UserGroupInformation ugi; Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=1598435&r1=1598434&r2=1598435&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java Thu May 29 22:27:25 2014 @@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.security.t import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.Time; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -215,6 +216,6 @@ public class TestDiskError { dataNode.checkDiskError(); Thread.sleep(dataNode.checkDiskErrorInterval); long lastDiskErrorCheck = dataNode.getLastDiskErrorCheck(); - assertTrue("Disk Error check is not performed within " + dataNode.checkDiskErrorInterval + " ms", ((System.currentTimeMillis()-lastDiskErrorCheck) < (dataNode.checkDiskErrorInterval + slackTime))); + assertTrue("Disk Error check is not performed within " + dataNode.checkDiskErrorInterval + " ms", ((Time.monotonicNow()-lastDiskErrorCheck) < (dataNode.checkDiskErrorInterval + slackTime))); } } Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java?rev=1598435&r1=1598434&r2=1598435&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java Thu May 29 22:27:25 2014 @@ -39,8 +39,8 @@ import org.apache.hadoop.hdfs.MiniDFSClu import org.apache.hadoop.hdfs.web.resources.GetOpParam; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.security.authorize.ProxyServers; +import org.apache.hadoop.security.authorize.ProxyUsers; import org.junit.Before; import org.junit.Test; Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java?rev=1598435&r1=1598434&r2=1598435&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java Thu May 29 22:27:25 2014 @@ -17,11 +17,16 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import java.net.BindException; +import java.util.Random; + import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -43,6 +48,9 @@ public class TestEditLogAutoroll { private NameNode nn0; private FileSystem fs; private FSEditLog editLog; + private final Random random = new Random(); + + private static final Log LOG = LogFactory.getLog(TestEditLog.class); @Before public void setUp() throws Exception { @@ -54,24 +62,35 @@ public class TestEditLogAutoroll { conf.setFloat(DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD, 0.5f); conf.setInt(DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS, 100); - MiniDFSNNTopology topology = new MiniDFSNNTopology() - .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10061)) - .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10062))); - - cluster = new MiniDFSCluster.Builder(conf) - .nnTopology(topology) - .numDataNodes(0) - .build(); - cluster.waitActive(); - - nn0 = cluster.getNameNode(0); - fs = HATestUtil.configureFailoverFs(cluster, conf); - - cluster.transitionToActive(0); - - fs = cluster.getFileSystem(0); - editLog = nn0.getNamesystem().getEditLog(); + int retryCount = 0; + while (true) { + try { + int basePort = 10060 + random.nextInt(100) * 2; + MiniDFSNNTopology topology = new MiniDFSNNTopology() + .addNameservice(new MiniDFSNNTopology.NSConf("ns1") + .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort)) + .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1))); + + cluster = new MiniDFSCluster.Builder(conf) + .nnTopology(topology) + .numDataNodes(0) + .build(); + cluster.waitActive(); + + nn0 = cluster.getNameNode(0); + fs = HATestUtil.configureFailoverFs(cluster, conf); + + cluster.transitionToActive(0); + + fs = cluster.getFileSystem(0); + editLog = nn0.getNamesystem().getEditLog(); + ++retryCount; + break; + } catch (BindException e) { + LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry " + + retryCount + " times"); + } + } } @After Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java?rev=1598435&r1=1598434&r2=1598435&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java Thu May 29 22:27:25 2014 @@ -104,7 +104,7 @@ public class TestFailureToReadEdits { HAUtil.setAllowStandbyReads(conf, true); if (clusterType == TestType.SHARED_DIR_HA) { - MiniDFSNNTopology topology = MiniQJMHACluster.createDefaultTopology(); + MiniDFSNNTopology topology = MiniQJMHACluster.createDefaultTopology(10000); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) .numDataNodes(0) Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java?rev=1598435&r1=1598434&r2=1598435&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java Thu May 29 22:27:25 2014 @@ -25,12 +25,14 @@ import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; import java.io.OutputStream; +import java.net.BindException; import java.lang.management.ManagementFactory; import java.lang.management.ThreadInfo; import java.lang.management.ThreadMXBean; import java.net.URI; import java.net.URL; import java.util.List; +import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -73,6 +75,7 @@ public class TestStandbyCheckpoints { protected MiniDFSCluster cluster; protected NameNode nn0, nn1; protected FileSystem fs; + private final Random random = new Random(); protected File tmpOivImgDir; private static final Log LOG = LogFactory.getLog(TestStandbyCheckpoints.class); @@ -87,22 +90,33 @@ public class TestStandbyCheckpoints { conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0); - MiniDFSNNTopology topology = new MiniDFSNNTopology() - .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10061)) - .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10062))); - - cluster = new MiniDFSCluster.Builder(conf) - .nnTopology(topology) - .numDataNodes(0) - .build(); - cluster.waitActive(); - - nn0 = cluster.getNameNode(0); - nn1 = cluster.getNameNode(1); - fs = HATestUtil.configureFailoverFs(cluster, conf); - - cluster.transitionToActive(0); + int retryCount = 0; + while (true) { + try { + int basePort = 10060 + random.nextInt(100) * 2; + MiniDFSNNTopology topology = new MiniDFSNNTopology() + .addNameservice(new MiniDFSNNTopology.NSConf("ns1") + .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort)) + .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1))); + + cluster = new MiniDFSCluster.Builder(conf) + .nnTopology(topology) + .numDataNodes(0) + .build(); + cluster.waitActive(); + + nn0 = cluster.getNameNode(0); + nn1 = cluster.getNameNode(1); + fs = HATestUtil.configureFailoverFs(cluster, conf); + + cluster.transitionToActive(0); + ++retryCount; + break; + } catch (BindException e) { + LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry " + + retryCount + " times"); + } + } } protected Configuration setupCommonConfig() { Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java?rev=1598435&r1=1598434&r2=1598435&view=diff ============================================================================== --- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java (original) +++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java Thu May 29 22:27:25 2014 @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.security.authorize.DefaultImpersonationProvider; import org.apache.hadoop.security.authorize.ProxyUsers; import org.junit.After; import org.junit.Before; @@ -150,8 +151,8 @@ public class TestRefreshUserMappings { final String [] GROUP_NAMES2 = new String [] {"gr3" , "gr4"}; //keys in conf - String userKeyGroups = ProxyUsers.getProxySuperuserGroupConfKey(SUPER_USER); - String userKeyHosts = ProxyUsers.getProxySuperuserIpConfKey (SUPER_USER); + String userKeyGroups = DefaultImpersonationProvider.getProxySuperuserGroupConfKey(SUPER_USER); + String userKeyHosts = DefaultImpersonationProvider.getProxySuperuserIpConfKey (SUPER_USER); config.set(userKeyGroups, "gr3,gr4,gr5"); // superuser can proxy for this group config.set(userKeyHosts,"127.0.0.1");