[ https://issues.apache.org/jira/browse/HDFS-13972?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16693348#comment-16693348 ]
Brahma Reddy Battula commented on HDFS-13972: --------------------------------------------- [~crh] thanks for working on this. The approach looks good to me apart from the following. {quote}Also as discussed in previous threads, we can do optimizations to re-use namenode code, but have kept it simple for now. {quote} Yes, we can optimize. JspHelper.java and UserProvider.java also will be loaded classpath which is similar(except following) to RouterJSPHelper(RJH) and RouterUserProvider(RUP) . You might have get success RJH and RUP loaded first in Classpath in your test. * We can have one interface for verifytoken(...) which can be implmented by both namenode and Router(like below) So that we no need to have RJH and RUP. {code:java} +package org.apache.hadoop.hdfs.server.common; + +import java.io.IOException; + +import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; + +public interface TokenVerifier<T extends AbstractDelegationTokenIdentifier> { + void verifyToken(T t, byte[] password) throws IOException; +} --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java @@ -176,10 +176,10 @@ private static UserGroupInformation getTokenUGI(ServletContext context, DelegationTokenIdentifier id = new DelegationTokenIdentifier(); id.readFields(in); if (context != null) { - final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); + final TokenVerifier nn = NameNodeHttpServer.getTokenVerifierFromContext(context); if (nn != null) { // Verify the token. - nn.getNamesystem().verifyToken(id, token.getPassword()); + nn.verifyToken(id, token.getPassword()); } --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode; +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap; import org.apache.hadoop.hdfs.server.aliasmap.InMemoryLevelDBAliasMapServer; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; @@ -55,6 +56,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.MetricsLoggerTask; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; +import org.apache.hadoop.hdfs.server.common.TokenVerifier; import org.apache.hadoop.hdfs.server.namenode.ha.ActiveState; import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby; import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; @@ -208,7 +210,7 @@ **********************************************************/ @InterfaceAudience.Private public class NameNode extends ReconfigurableBase implements - NameNodeStatusMXBean { + NameNodeStatusMXBean, TokenVerifier<DelegationTokenIdentifier> { static{ HdfsConfiguration.init(); } @@ -2202,4 +2204,10 @@ String reconfigureSPSModeEvent(String newVal, String property) protected Configuration getNewConf() { return new HdfsConfiguration(); } + + @Override + public void verifyToken(DelegationTokenIdentifier tokenId, byte[] password) + throws IOException { + namesystem.verifyToken(tokenId, password); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index 1bc43b896ae..e199a10bd5b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -36,7 +36,9 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.common.JspHelper; +import org.apache.hadoop.hdfs.server.common.TokenVerifier; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; import org.apache.hadoop.hdfs.web.AuthFilter; @@ -308,6 +310,10 @@ public static NameNode getNameNodeFromContext(ServletContext context) { return (NameNode)context.getAttribute(NAMENODE_ATTRIBUTE_KEY); } + public static TokenVerifier getTokenVerifierFromContext(ServletContext context) { + return (TokenVerifier) context.getAttribute(NAMENODE_ATTRIBUTE_KEY); + } + --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java @@ -33,6 +33,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.hdfs.server.common.TokenVerifier; import org.apache.hadoop.hdfs.server.federation.metrics.FederationMetrics; import org.apache.hadoop.hdfs.server.federation.metrics.NamenodeBeanMetrics; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; @@ -70,7 +72,8 @@ */ @InterfaceAudience.Private @InterfaceStability.Evolving -public class Router extends CompositeService { +public class Router extends CompositeService + implements TokenVerifier<DelegationTokenIdentifier> { private static final Logger LOG = LoggerFactory.getLogger(Router.class); @@ -673,4 +676,10 @@ RouterQuotaUpdateService getQuotaCacheUpdateService() { RouterSafemodeService getSafemodeService() { return this.safemodeService; } + + @Override + public void verifyToken(DelegationTokenIdentifier tokenId, byte[] password) + throws IOException { + getRpcServer().getRouterSecurityManager().verifyToken(id, token.getPassword()); + }{code} * I think, while storing keys we can put in currentTokens so that we no need to connect ZK while geting the token.May be check this change in your cluster and handle in seperate Jira so that it can be committed to trunk also. {code:java} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java index 88bd29bd730..741163bc116 100644 — a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java @@ -807,6 +807,7 @@ protected void storeToken(TokenIdent ident, DelegationTokenInformation tokenInfo) throws IOException { try { addOrUpdateToken(ident, tokenInfo, false); + currentTokens.put(ident, tokenInfo); }catch (Exception e){ throw new RuntimeException(e); } {code} * Need to see load on ZK when there multiple DT are created( which creates so many ZK connections).Do you kept eye on that? * Better to have some UT > RBF: Support for Delegation Token (WebHDFS) > ------------------------------------------- > > Key: HDFS-13972 > URL: https://issues.apache.org/jira/browse/HDFS-13972 > Project: Hadoop HDFS > Issue Type: Sub-task > Reporter: Íñigo Goiri > Assignee: CR Hota > Priority: Major > Attachments: HDFS-13972-HDFS-13891.001.patch > > > HDFS Router should support issuing HDFS delegation tokens through WebHDFS. -- This message was sent by Atlassian JIRA (v7.6.3#76005) --------------------------------------------------------------------- To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org