Repository: hadoop Updated Branches: refs/heads/branch-2.7 c2d3af39f -> 13414be1a
HDFS-9804. Allow long-running Balancer to login with keytab. Contributed by Xiao Chen. (cherry picked from commit ccff6035f50769eb69701128ae61efc69e82609d) (cherry picked from commit 7d402692b4b06af0459ea81129e93a5182291a27) (cherry picked from commit b8b4ea67d8f24403a05c9a1dabc04b0ab59a9136) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7f787408 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7f787408 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7f787408 Branch: refs/heads/branch-2.7 Commit: 7f78740885a4190e52bec16c9d46c8ba939753e8 Parents: c2d3af3 Author: Zhe Zhang <[email protected]> Authored: Thu Aug 11 10:53:16 2016 -0700 Committer: Zhe Zhang <[email protected]> Committed: Thu Aug 11 16:18:40 2016 -0700 ---------------------------------------------------------------------- .../hadoop/security/UserGroupInformation.java | 2 +- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 6 + .../hadoop/hdfs/server/balancer/Balancer.java | 23 +++- .../src/main/resources/hdfs-default.xml | 45 ++++++- .../hdfs/server/balancer/TestBalancer.java | 132 ++++++++++++++++++- 5 files changed, 199 insertions(+), 9 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f787408/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index 8284208..7f4ae96 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -1192,7 +1192,7 @@ public class UserGroupInformation { if (now - user.getLastLogin() < MIN_TIME_BEFORE_RELOGIN ) { LOG.warn("Not attempting to re-login since the last re-login was " + "attempted less than " + (MIN_TIME_BEFORE_RELOGIN/1000) + " seconds"+ - " before."); + " before. Last Login=" + user.getLastLogin()); return false; } return true; http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f787408/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index e73b9da..7c96f39 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -428,6 +428,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final int DFS_BALANCER_MOVERTHREADS_DEFAULT = 1000; public static final String DFS_BALANCER_DISPATCHERTHREADS_KEY = "dfs.balancer.dispatcherThreads"; public static final int DFS_BALANCER_DISPATCHERTHREADS_DEFAULT = 200; + public static final String DFS_BALANCER_KEYTAB_ENABLED_KEY = "dfs.balancer.keytab.enabled"; + public static final boolean DFS_BALANCER_KEYTAB_ENABLED_DEFAULT = false; + public static final String DFS_BALANCER_ADDRESS_KEY = "dfs.balancer.address"; + public static final String DFS_BALANCER_ADDRESS_DEFAULT= "0.0.0.0:0"; + public static final String DFS_BALANCER_KEYTAB_FILE_KEY = "dfs.balancer.keytab.file"; + public static final String DFS_BALANCER_KERBEROS_PRINCIPAL_KEY = "dfs.balancer.kerberos.principal"; public static final String DFS_MOVER_MOVEDWINWIDTH_KEY = "dfs.mover.movedWinWidth"; public static final long DFS_MOVER_MOVEDWINWIDTH_DEFAULT = 5400*1000L; http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f787408/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index 10524ef..b792471 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -21,6 +21,7 @@ import static com.google.common.base.Preconditions.checkArgument; import java.io.IOException; import java.io.PrintStream; +import java.net.InetSocketAddress; import java.net.URI; import java.text.DateFormat; import java.util.Arrays; @@ -53,6 +54,9 @@ import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; @@ -578,7 +582,8 @@ public class Balancer { DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000; LOG.info("namenodes = " + namenodes); LOG.info("parameters = " + p); - + + checkKeytabAndInit(conf); System.out.println("Time Stamp Iteration# Bytes Already Moved Bytes Left To Move Bytes Being Moved"); List<NameNodeConnector> connectors = Collections.emptyList(); @@ -617,6 +622,22 @@ public class Balancer { return ExitStatus.SUCCESS.getExitCode(); } + private static void checkKeytabAndInit(Configuration conf) + throws IOException { + if (conf.getBoolean(DFSConfigKeys.DFS_BALANCER_KEYTAB_ENABLED_KEY, + DFSConfigKeys.DFS_BALANCER_KEYTAB_ENABLED_DEFAULT)) { + LOG.info("Keytab is configured, will login using keytab."); + UserGroupInformation.setConfiguration(conf); + String addr = conf.get(DFSConfigKeys.DFS_BALANCER_ADDRESS_KEY, + DFSConfigKeys.DFS_BALANCER_ADDRESS_DEFAULT); + InetSocketAddress socAddr = NetUtils.createSocketAddr(addr, 0, + DFSConfigKeys.DFS_BALANCER_ADDRESS_KEY); + SecurityUtil.login(conf, DFSConfigKeys.DFS_BALANCER_KEYTAB_FILE_KEY, + DFSConfigKeys.DFS_BALANCER_KERBEROS_PRINCIPAL_KEY, + socAddr.getHostName()); + } + } + /* Given elaspedTime in ms, return a printable string */ private static String time2Str(long elapsedTime) { String unit; http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f787408/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 101441c..2192354 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -1340,7 +1340,7 @@ <value></value> <description> The NameNode service principal. This is typically set to - nn/[email protected]. Each NameNode will subsitute _HOST with its + nn/[email protected]. Each NameNode will substitute _HOST with its own fully qualified hostname at startup. The _HOST placeholder allows using the same configuration setting on both NameNodes in an HA setup. @@ -1362,7 +1362,7 @@ <value></value> <description> The DataNode service principal. This is typically set to - dn/[email protected]. Each DataNode will subsitute _HOST with its + dn/[email protected]. Each DataNode will substitute _HOST with its own fully qualified hostname at startup. The _HOST placeholder allows using the same configuration setting on all DataNodes. </description> @@ -1383,7 +1383,7 @@ <value></value> <description> The JournalNode service principal. This is typically set to - jn/[email protected]. Each JournalNode will subsitute _HOST with its + jn/[email protected]. Each JournalNode will substitute _HOST with its own fully qualified hostname at startup. The _HOST placeholder allows using the same configuration setting on all JournalNodes. </description> @@ -2473,4 +2473,43 @@ reduces initial request failures after datanode restart. </description> </property> + <property> + <name>dfs.balancer.keytab.enabled</name> + <value>false</value> + <description> + Set to true to enable login using a keytab for Kerberized Hadoop. + </description> + </property> + + <property> + <name>dfs.balancer.address</name> + <value>0.0.0.0:0</value> + <description> + The hostname used for a keytab based Kerberos login. Keytab based login + can be enabled with dfs.balancer.keytab.enabled. + </description> + </property> + + <property> + <name>dfs.balancer.keytab.file</name> + <value></value> + <description> + The keytab file used by the Balancer to login as its + service principal. The principal name is configured with + dfs.balancer.kerberos.principal. Keytab based login can be + enabled with dfs.balancer.keytab.enabled. + </description> + </property> + + <property> + <name>dfs.balancer.kerberos.principal</name> + <value></value> + <description> + The Balancer principal. This is typically set to + balancer/[email protected]. The Balancer will substitute _HOST with its + own fully qualified hostname at startup. The _HOST placeholder + allows using the same configuration setting on different servers. + Keytab based login can be enabled with dfs.balancer.keytab.enabled. + </description> + </property> </configuration> http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f787408/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index 2b1f708..420c783 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -17,9 +17,30 @@ */ package org.apache.hadoop.hdfs.server.balancer; +import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY; import static org.apache.hadoop.fs.StorageType.DEFAULT; import static org.apache.hadoop.fs.StorageType.RAM_DISK; import static org.apache.hadoop.hdfs.DFSConfigKeys.*; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BALANCER_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BALANCER_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BALANCER_KEYTAB_ENABLED_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BALANCER_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTP_POLICY_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -31,12 +52,14 @@ import java.io.PrintWriter; import java.net.InetAddress; import java.net.URI; import java.net.InetSocketAddress; +import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Properties; import java.util.Random; import java.util.Set; import java.util.concurrent.TimeoutException; @@ -48,6 +71,7 @@ import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.permission.FsPermission; @@ -66,11 +90,17 @@ import org.apache.hadoop.hdfs.server.balancer.Balancer.Parameters; import org.apache.hadoop.hdfs.server.balancer.Balancer.Result; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; +import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.minikdc.MiniKdc; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; import org.apache.log4j.Level; +import org.junit.After; import org.junit.Test; /** @@ -89,8 +119,70 @@ public class TestBalancer { final static String RACK2 = "/rack2"; final private static String fileName = "/tmp.txt"; final static Path filePath = new Path(fileName); + final static private String username = "balancer"; + private static String principal; + private static File baseDir; + private static MiniKdc kdc; + private static File keytabFile; private MiniDFSCluster cluster; + static void initSecureConf(Configuration conf) throws Exception { + baseDir = new File(System.getProperty("test.build.dir", "target/test-dir"), + TestBalancer.class.getSimpleName()); + FileUtil.fullyDelete(baseDir); + assertTrue(baseDir.mkdirs()); + + Properties kdcConf = MiniKdc.createConf(); + kdc = new MiniKdc(kdcConf, baseDir); + kdc.start(); + + SecurityUtil.setAuthenticationMethod( + UserGroupInformation.AuthenticationMethod.KERBEROS, conf); + UserGroupInformation.setConfiguration(conf); + assertTrue("Expected configuration to enable security", + UserGroupInformation.isSecurityEnabled()); + + keytabFile = new File(baseDir, username + ".keytab"); + String keytab = keytabFile.getAbsolutePath(); + // Windows will not reverse name lookup "127.0.0.1" to "localhost". + String krbInstance = Path.WINDOWS ? "127.0.0.1" : "localhost"; + principal = username + "/" + krbInstance + "@" + kdc.getRealm(); + String spnegoPrincipal = "HTTP/" + krbInstance + "@" + kdc.getRealm(); + kdc.createPrincipal(keytabFile, username, username + "/" + krbInstance, + "HTTP/" + krbInstance); + + conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, principal); + conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab); + conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, principal); + conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab); + conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal); + conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); + conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication"); + conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name()); + conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0"); + conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0"); + conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10); + + conf.setBoolean(DFS_BALANCER_KEYTAB_ENABLED_KEY, true); + conf.set(DFS_BALANCER_ADDRESS_KEY, "localhost:0"); + conf.set(DFS_BALANCER_KEYTAB_FILE_KEY, keytab); + conf.set(DFS_BALANCER_KERBEROS_PRINCIPAL_KEY, principal); + + String keystoresDir = baseDir.getAbsolutePath(); + String sslConfDir = KeyStoreTestUtil.getClasspathDir(TestBalancer.class); + KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); + + initConf(conf); + } + + @After + public void shutdown() throws Exception { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + } + ClientProtocol client; static final long TIMEOUT = 40000L; //msec @@ -806,6 +898,13 @@ public class TestBalancer { initConf(conf); oneNodeTest(conf, false); } + + @Test(timeout = 100000) + public void testUnknownDatanodeSimple() throws Exception { + Configuration conf = new HdfsConfiguration(); + initConf(conf); + testUnknownDatanode(conf); + } /* we first start a cluster and fill the cluster up to a certain size. * then redistribute blocks according the required distribution. @@ -814,10 +913,8 @@ public class TestBalancer { * A partially filled datanode is excluded during balancing. * This triggers a situation where one of the block's location is unknown. */ - @Test(timeout=100000) - public void testUnknownDatanode() throws Exception { - Configuration conf = new HdfsConfiguration(); - initConf(conf); + private void testUnknownDatanode(Configuration conf) + throws IOException, InterruptedException, TimeoutException { long distribution[] = new long[] {50*CAPACITY/100, 70*CAPACITY/100, 0*CAPACITY/100}; long capacities[] = new long[]{CAPACITY, CAPACITY, CAPACITY}; String racks[] = new String[] {RACK0, RACK1, RACK1}; @@ -1453,6 +1550,33 @@ public class TestBalancer { } /** + * Test Balancer runs fine when logging in with a keytab in kerberized env. + * Reusing testUnknownDatanode here for basic functionality testing. + */ + @Test(timeout = 300000) + public void testBalancerWithKeytabs() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initSecureConf(conf); + final UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI( + principal, keytabFile.getAbsolutePath()); + try { + ugi.doAs(new PrivilegedExceptionAction<Void>() { + @Override + public Void run() throws Exception { + // verify that balancer runs Ok. + testUnknownDatanode(conf); + // verify that UGI was logged in using keytab. + assertTrue(UserGroupInformation.isLoginKeytabBased()); + return null; + } + }); + } finally { + // Reset UGI so that other tests are not affected. + UserGroupInformation.setConfiguration(new Configuration()); + } + } + + /** * @param args */ public static void main(String[] args) throws Exception { --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
