Author: szetszwo
Date: Tue Apr 17 22:28:48 2012
New Revision: 1327311
URL: http://svn.apache.org/viewvc?rev=1327311&view=rev
Log:
HDFS-2652. Add support for host-based delegation tokens. Contributed by Daryn
Sharp
Modified:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
Modified:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1327311&r1=1327310&r2=1327311&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
(original)
+++
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
Tue Apr 17 22:28:48 2012
@@ -20,6 +20,9 @@ Release 0.23.3 - UNRELEASED
HDFS-3136. Remove SLF4J dependency as HDFS does not need it to fix
unnecessary warnings. (Jason Lowe via suresh)
+ HDFS-2652. Add support for host-based delegation tokens. (Daryn Sharp via
+ szetszwo)
+
Release 0.23.2 - UNRELEASED
INCOMPATIBLE CHANGES
Modified:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java?rev=1327311&r1=1327310&r2=1327311&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
(original)
+++
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
Tue Apr 17 22:28:48 2012
@@ -30,6 +30,7 @@ import java.security.PrivilegedException
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.TimeZone;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -48,7 +49,6 @@ import org.apache.hadoop.hdfs.security.t
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenRenewer;
import
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
import org.apache.hadoop.hdfs.server.common.JspHelper;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;
import org.apache.hadoop.hdfs.web.URLUtils;
import org.apache.hadoop.io.Text;
@@ -168,10 +168,7 @@ public class HftpFileSystem extends File
protected void initDelegationToken() throws IOException {
// look for hftp token, then try hdfs
- Token<?> token = selectHftpDelegationToken();
- if (token == null) {
- token = selectHdfsDelegationToken();
- }
+ Token<?> token = selectDelegationToken();
// if we don't already have a token, go get one over https
boolean createdToken = false;
@@ -192,14 +189,8 @@ public class HftpFileSystem extends File
}
}
- protected Token<DelegationTokenIdentifier> selectHftpDelegationToken() {
- Text serviceName = SecurityUtil.buildTokenService(nnSecureAddr);
- return hftpTokenSelector.selectToken(serviceName, ugi.getTokens());
- }
-
- protected Token<DelegationTokenIdentifier> selectHdfsDelegationToken() {
- return DelegationTokenSelector.selectHdfsDelegationToken(
- nnAddr, ugi, getConf());
+ protected Token<DelegationTokenIdentifier> selectDelegationToken() {
+ return hftpTokenSelector.selectToken(getUri(), ugi.getTokens(),
getConf());
}
@@ -699,9 +690,22 @@ public class HftpFileSystem extends File
private static class HftpDelegationTokenSelector
extends AbstractDelegationTokenSelector<DelegationTokenIdentifier> {
+ private static final DelegationTokenSelector hdfsTokenSelector =
+ new DelegationTokenSelector();
public HftpDelegationTokenSelector() {
super(TOKEN_KIND);
}
+
+ Token<DelegationTokenIdentifier> selectToken(URI nnUri,
+ Collection<Token<?>> tokens, Configuration conf) {
+ Token<DelegationTokenIdentifier> token =
+ selectToken(SecurityUtil.buildTokenService(nnUri), tokens);
+ if (token == null) {
+ // try to get a HDFS token
+ token = hdfsTokenSelector.selectToken(nnUri, tokens, conf);
+ }
+ return token;
+ }
}
}
Modified:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java?rev=1327311&r1=1327310&r2=1327311&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java
(original)
+++
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java
Tue Apr 17 22:28:48 2012
@@ -17,7 +17,8 @@
*/
package org.apache.hadoop.hdfs.security.token.delegation;
-import java.net.InetSocketAddress;
+import java.net.URI;
+import java.util.Collection;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
@@ -25,7 +26,6 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import
org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
@@ -37,27 +37,36 @@ public class DelegationTokenSelector
extends AbstractDelegationTokenSelector<DelegationTokenIdentifier>{
public static final String SERVICE_NAME_KEY = "hdfs.service.host_";
- private static final DelegationTokenSelector INSTANCE = new
DelegationTokenSelector();
-
- /** Select the delegation token for hdfs from the ugi. */
- public static Token<DelegationTokenIdentifier> selectHdfsDelegationToken(
- final InetSocketAddress nnAddr, final UserGroupInformation ugi,
+ /**
+ * Select the delegation token for hdfs. The port will be rewritten to
+ * the port of hdfs.service.host_$nnAddr, or the default rpc namenode port.
+ * This method should only be called by non-hdfs filesystems that do not
+ * use the rpc port to acquire tokens. Ex. webhdfs, hftp
+ * @param nnUri of the remote namenode
+ * @param tokens as a collection
+ * @param conf hadoop configuration
+ * @return Token
+ */
+ public Token<DelegationTokenIdentifier> selectToken(
+ final URI nnUri, Collection<Token<?>> tokens,
final Configuration conf) {
// this guesses the remote cluster's rpc service port.
// the current token design assumes it's the same as the local cluster's
// rpc port unless a config key is set. there should be a way to automatic
// and correctly determine the value
- final String key = SERVICE_NAME_KEY +
SecurityUtil.buildTokenService(nnAddr);
- final String nnServiceName = conf.get(key);
-
+ Text serviceName = SecurityUtil.buildTokenService(nnUri);
+ final String nnServiceName = conf.get(SERVICE_NAME_KEY + serviceName);
+
int nnRpcPort = NameNode.DEFAULT_PORT;
if (nnServiceName != null) {
nnRpcPort = NetUtils.createSocketAddr(nnServiceName,
nnRpcPort).getPort();
}
- final Text serviceName = SecurityUtil.buildTokenService(
- new InetSocketAddress(nnAddr.getHostName(), nnRpcPort));
- return INSTANCE.selectToken(serviceName, ugi.getTokens());
+ // use original hostname from the uri to avoid unintentional host resolving
+ serviceName = SecurityUtil.buildTokenService(
+ NetUtils.createSocketAddrForHost(nnUri.getHost(), nnRpcPort));
+
+ return selectToken(serviceName, tokens);
}
public DelegationTokenSelector() {
Modified:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1327311&r1=1327310&r2=1327311&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
(original)
+++
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
Tue Apr 17 22:28:48 2012
@@ -29,6 +29,7 @@ import java.net.MalformedURLException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
+import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;
@@ -117,8 +118,8 @@ public class WebHdfsFileSystem extends F
/** Delegation token kind */
public static final Text TOKEN_KIND = new Text("WEBHDFS delegation");
/** Token selector */
- public static final
AbstractDelegationTokenSelector<DelegationTokenIdentifier> DT_SELECTOR
- = new
AbstractDelegationTokenSelector<DelegationTokenIdentifier>(TOKEN_KIND) {};
+ public static final WebHdfsDelegationTokenSelector DT_SELECTOR
+ = new WebHdfsDelegationTokenSelector();
private static DelegationTokenRenewer<WebHdfsFileSystem> DT_RENEWER = null;
@@ -164,7 +165,7 @@ public class WebHdfsFileSystem extends F
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
- this.nnAddr = NetUtils.createSocketAddr(uri.toString());
+ this.nnAddr = NetUtils.createSocketAddrForHost(uri.getHost(),
uri.getPort());
this.workingDir = getHomeDirectory();
if (UserGroupInformation.isSecurityEnabled()) {
@@ -174,12 +175,7 @@ public class WebHdfsFileSystem extends F
protected void initDelegationToken() throws IOException {
// look for webhdfs token, then try hdfs
- final Text serviceName = SecurityUtil.buildTokenService(nnAddr);
- Token<?> token = DT_SELECTOR.selectToken(serviceName, ugi.getTokens());
- if (token == null) {
- token = DelegationTokenSelector.selectHdfsDelegationToken(
- nnAddr, ugi, getConf());
- }
+ Token<?> token = selectDelegationToken();
//since we don't already have a token, go get one
boolean createdToken = false;
@@ -200,6 +196,10 @@ public class WebHdfsFileSystem extends F
}
}
+ protected Token<DelegationTokenIdentifier> selectDelegationToken() {
+ return DT_SELECTOR.selectToken(getUri(), ugi.getTokens(), getConf());
+ }
+
@Override
protected int getDefaultPort() {
return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
@@ -845,4 +845,24 @@ public class WebHdfsFileSystem extends F
}
}
}
+
+ private static class WebHdfsDelegationTokenSelector
+ extends AbstractDelegationTokenSelector<DelegationTokenIdentifier> {
+ private static final DelegationTokenSelector hdfsTokenSelector =
+ new DelegationTokenSelector();
+
+ public WebHdfsDelegationTokenSelector() {
+ super(TOKEN_KIND);
+ }
+
+ Token<DelegationTokenIdentifier> selectToken(URI nnUri,
+ Collection<Token<?>> tokens, Configuration conf) {
+ Token<DelegationTokenIdentifier> token =
+ selectToken(SecurityUtil.buildTokenService(nnUri), tokens);
+ if (token == null) {
+ token = hdfsTokenSelector.selectToken(nnUri, tokens, conf);
+ }
+ return token;
+ }
+ }
}
Modified:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1327311&r1=1327310&r2=1327311&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
(original)
+++
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
Tue Apr 17 22:28:48 2012
@@ -68,6 +68,7 @@ import org.apache.hadoop.net.DNSToSwitch
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.StaticMapping;
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
+import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
@@ -902,16 +903,14 @@ public class MiniDFSCluster {
if(dn == null)
throw new IOException("Cannot start DataNode in "
+ dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
- //NOTE: the following is true if and only if:
- // hadoop.security.token.service.use_ip=true
- //since the HDFS does things based on IP:port, we need to add the mapping
- //for IP:port to rackId
- String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();
+ //since the HDFS does things based on host|ip:port, we need to add the
+ //mapping for the service to rackId
+ String service =
+ SecurityUtil.buildTokenService(dn.getSelfAddr()).toString();
if (racks != null) {
- int port = dn.getSelfAddr().getPort();
- LOG.info("Adding node with IP:port : " + ipAddr + ":" + port +
+ LOG.info("Adding node with service : " + service +
" to rack " + racks[i-curDatanodesNum]);
- StaticMapping.addNodeToRack(ipAddr + ":" + port,
+ StaticMapping.addNodeToRack(service,
racks[i-curDatanodesNum]);
}
dn.runDatanodeDaemon();
Modified:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java?rev=1327311&r1=1327310&r2=1327311&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java
(original)
+++
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java
Tue Apr 17 22:28:48 2012
@@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.FileSystem;
import
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.SecurityUtilTestHelper;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
@@ -66,4 +67,59 @@ public class TestHftpDelegationToken {
renewToken.setAccessible(true);
assertSame("wrong token", token, renewToken.get(fs));
}
+
+ @Test
+ public void testSelectHdfsDelegationToken() throws Exception {
+ SecurityUtilTestHelper.setTokenServiceUseIp(true);
+
+ Configuration conf = new Configuration();
+ URI hftpUri = URI.create("hftp://localhost:0");
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+ Token<?> token = null;
+
+ // test fallback to hdfs token
+ Token<?> hdfsToken = new Token<TokenIdentifier>(
+ new byte[0], new byte[0],
+ DelegationTokenIdentifier.HDFS_DELEGATION_KIND,
+ new Text("127.0.0.1:8020"));
+ ugi.addToken(hdfsToken);
+
+ HftpFileSystem fs = (HftpFileSystem) FileSystem.get(hftpUri, conf);
+ token = fs.selectDelegationToken();
+ assertNotNull(token);
+ assertEquals(hdfsToken, token);
+
+ // test hftp is favored over hdfs
+ Token<?> hftpToken = new Token<TokenIdentifier>(
+ new byte[0], new byte[0],
+ HftpFileSystem.TOKEN_KIND, new Text("127.0.0.1:0"));
+ ugi.addToken(hftpToken);
+ token = fs.selectDelegationToken();
+ assertNotNull(token);
+ assertEquals(hftpToken, token);
+
+ // switch to using host-based tokens, no token should match
+ SecurityUtilTestHelper.setTokenServiceUseIp(false);
+ token = fs.selectDelegationToken();
+ assertNull(token);
+
+ // test fallback to hdfs token
+ hdfsToken = new Token<TokenIdentifier>(
+ new byte[0], new byte[0],
+ DelegationTokenIdentifier.HDFS_DELEGATION_KIND,
+ new Text("localhost:8020"));
+ ugi.addToken(hdfsToken);
+ token = fs.selectDelegationToken();
+ assertNotNull(token);
+ assertEquals(hdfsToken, token);
+
+ // test hftp is favored over hdfs
+ hftpToken = new Token<TokenIdentifier>(
+ new byte[0], new byte[0],
+ HftpFileSystem.TOKEN_KIND, new Text("localhost:0"));
+ ugi.addToken(hftpToken);
+ token = fs.selectDelegationToken();
+ assertNotNull(token);
+ assertEquals(hftpToken, token);
+ }
}
\ No newline at end of file
Modified:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java?rev=1327311&r1=1327310&r2=1327311&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java
(original)
+++
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java
Tue Apr 17 22:28:48 2012
@@ -49,6 +49,7 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.security.SaslInputStream;
import org.apache.hadoop.security.SaslRpcClient;
import org.apache.hadoop.security.SaslRpcServer;
+import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.log4j.Level;
@@ -105,10 +106,8 @@ public class TestClientProtocolWithDeleg
DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(owner,
owner, null);
Token<DelegationTokenIdentifier> token = new
Token<DelegationTokenIdentifier>(
dtId, sm);
- Text host = new Text(addr.getAddress().getHostAddress() + ":"
- + addr.getPort());
- token.setService(host);
- LOG.info("Service IP address for token is " + host);
+ SecurityUtil.setTokenService(token, addr);
+ LOG.info("Service for token is " + token.getService());
current.addToken(token);
current.doAs(new PrivilegedExceptionAction<Object>() {
@Override
Modified:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java?rev=1327311&r1=1327310&r2=1327311&view=diff
==============================================================================
---
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
(original)
+++
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
Tue Apr 17 22:28:48 2012
@@ -34,10 +34,16 @@ import org.apache.hadoop.hdfs.web.resour
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
import org.apache.hadoop.hdfs.web.resources.PutOpParam;
import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.SecurityUtilTestHelper;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
import org.junit.Assert;
import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
import static org.mockito.Mockito.mock;
public class TestWebHdfsUrl {
@@ -90,4 +96,60 @@ public class TestWebHdfsUrl {
private String generateUrlQueryPrefix(HttpOpParam.Op op, String username) {
return "op=" + op.toString() + "&user.name=" + username;
}
+
+ @Test
+ public void testSelectDelegationToken() throws Exception {
+ SecurityUtilTestHelper.setTokenServiceUseIp(true);
+
+ Configuration conf = new Configuration();
+ URI webHdfsUri = URI.create("webhdfs://localhost:0");
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+ Token<?> token = null;
+
+ // test fallback to hdfs token
+ Token<?> hdfsToken = new Token<TokenIdentifier>(
+ new byte[0], new byte[0],
+ DelegationTokenIdentifier.HDFS_DELEGATION_KIND,
+ new Text("127.0.0.1:8020"));
+ ugi.addToken(hdfsToken);
+
+ WebHdfsFileSystem fs = (WebHdfsFileSystem) FileSystem.get(webHdfsUri,
conf);
+ token = fs.selectDelegationToken();
+ assertNotNull(token);
+ assertEquals(hdfsToken, token);
+
+ // test webhdfs is favored over hdfs
+ Token<?> webHdfsToken = new Token<TokenIdentifier>(
+ new byte[0], new byte[0],
+ WebHdfsFileSystem.TOKEN_KIND, new Text("127.0.0.1:0"));
+ ugi.addToken(webHdfsToken);
+ token = fs.selectDelegationToken();
+ assertNotNull(token);
+ assertEquals(webHdfsToken, token);
+
+ // switch to using host-based tokens, no token should match
+ SecurityUtilTestHelper.setTokenServiceUseIp(false);
+ token = fs.selectDelegationToken();
+ assertNull(token);
+
+ // test fallback to hdfs token
+ hdfsToken = new Token<TokenIdentifier>(
+ new byte[0], new byte[0],
+ DelegationTokenIdentifier.HDFS_DELEGATION_KIND,
+ new Text("localhost:8020"));
+ ugi.addToken(hdfsToken);
+ token = fs.selectDelegationToken();
+ assertNotNull(token);
+ assertEquals(hdfsToken, token);
+
+ // test webhdfs is favored over hdfs
+ webHdfsToken = new Token<TokenIdentifier>(
+ new byte[0], new byte[0],
+ WebHdfsFileSystem.TOKEN_KIND, new Text("localhost:0"));
+ ugi.addToken(webHdfsToken);
+ token = fs.selectDelegationToken();
+ assertNotNull(token);
+ assertEquals(webHdfsToken, token);
+ }
+
}