goiri commented on a change in pull request #3346:
URL: https://github.com/apache/hadoop/pull/3346#discussion_r699774160
##########
File path:
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
##########
@@ -113,47 +117,93 @@ public NamenodeHeartbeatService(
}
+ /**
+ * Create a new Namenode status updater.
+ *
+ * @param resolver Namenode resolver service to handle NN registration.
+ * @param nsId Identifier of the nameservice.
+ * @param nnId Identifier of the namenode in HA.
+ * @param resolvedHost resolvedHostname for this specific namenode.
+ */
+ public NamenodeHeartbeatService(
+ ActiveNamenodeResolver resolver, String nsId, String nnId, String
resolvedHost) {
+ super(getNnHeartBeatServiceName(nsId, nnId));
+
+ this.resolver = resolver;
+
+ this.nameserviceId = nsId;
+ // Concat a uniq id from original nnId and resolvedHost
+ this.namenodeId = nnId + "-" + resolvedHost;
+ this.resolvedHost = resolvedHost;
+ // Same the original nnid to get the ports from config.
+ this.originalNnId = nnId;
+
+ }
+
@Override
protected void serviceInit(Configuration configuration) throws Exception {
this.conf = DFSHAAdmin.addSecurityConfiguration(configuration);
String nnDesc = nameserviceId;
if (this.namenodeId != null && !this.namenodeId.isEmpty()) {
- this.localTarget = new NNHAServiceTarget(
- conf, nameserviceId, namenodeId);
nnDesc += "-" + namenodeId;
} else {
this.localTarget = null;
}
+ if (originalNnId == null) {
+ originalNnId = namenodeId;
+ }
+
// Get the RPC address for the clients to connect
- this.rpcAddress = getRpcAddress(conf, nameserviceId, namenodeId);
+ this.rpcAddress = getRpcAddress(conf, nameserviceId, originalNnId);
+ if (resolvedHost != null) {
+ rpcAddress = resolvedHost + ":"
+ + NetUtils.createSocketAddr(rpcAddress).getPort();
+ }
LOG.info("{} RPC address: {}", nnDesc, rpcAddress);
// Get the Service RPC address for monitoring
this.serviceAddress =
- DFSUtil.getNamenodeServiceAddr(conf, nameserviceId, namenodeId);
+ DFSUtil.getNamenodeServiceAddr(conf, nameserviceId, originalNnId);
if (this.serviceAddress == null) {
LOG.error("Cannot locate RPC service address for NN {}, " +
"using RPC address {}", nnDesc, this.rpcAddress);
this.serviceAddress = this.rpcAddress;
}
+ if (resolvedHost != null) {
Review comment:
We do the same thing over and over for the lifeline and the others.
Maybe do all of them in a single shot?
The way to extract the port might also be expensive to be honest; creating a
socket address is usually bad.
##########
File path:
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
##########
@@ -426,37 +426,53 @@ static String concatSuffixes(String... suffixes) {
Collection<String> nnIds = getNameNodeIds(conf, nsId);
Map<String, InetSocketAddress> ret = Maps.newLinkedHashMap();
for (String nnId : emptyAsSingletonNull(nnIds)) {
- String suffix = concatSuffixes(nsId, nnId);
- String address = checkKeysAndProcess(defaultValue, suffix, conf, keys);
- if (address != null) {
- InetSocketAddress isa = NetUtils.createSocketAddr(address);
- try {
- // Datanode should just use FQDN
- String[] resolvedHostNames = dnr
- .getAllResolvedHostnameByDomainName(isa.getHostName(), true);
- int port = isa.getPort();
- for (String hostname : resolvedHostNames) {
- InetSocketAddress inetSocketAddress = new InetSocketAddress(
- hostname, port);
- // Concat nn info with host info to make uniq ID
- String concatId;
- if (nnId == null || nnId.isEmpty()) {
- concatId = String
- .join("-", nsId, hostname, String.valueOf(port));
- } else {
- concatId = String
- .join("-", nsId, nnId, hostname, String.valueOf(port));
- }
- ret.put(concatId, inetSocketAddress);
- }
- } catch (UnknownHostException e) {
- LOG.error("Failed to resolve address: " + address);
+ ret.putAll(getResolvedAddressesForNnId(
+ conf, nsId, nnId, dnr, defaultValue, keys));
+ }
+ return ret;
+ }
+
+ public static Map<String, InetSocketAddress> getResolvedAddressesForNnId(
+ Configuration conf, String nsId, String nnId,
+ DomainNameResolver dnr, String defaultValue,
+ String... keys) {
+ String suffix = concatSuffixes(nsId, nnId);
+ String address = checkKeysAndProcess(defaultValue, suffix, conf, keys);
+ Map<String, InetSocketAddress> ret = Maps.newLinkedHashMap();
+ if (address != null) {
+ InetSocketAddress isa = NetUtils.createSocketAddr(address);
+ try {
+ String[] resolvedHostNames = dnr
+ .getAllResolvedHostnameByDomainName(isa.getHostName(), true);
+ int port = isa.getPort();
+ for (String hostname : resolvedHostNames) {
+ InetSocketAddress inetSocketAddress = new InetSocketAddress(
+ hostname, port);
+ // Concat nn info with host info to make uniq ID
+ String concatId = getConcatNnId(nsId, nnId, hostname, port);
+ ret.put(concatId, inetSocketAddress);
}
+ } catch (UnknownHostException e) {
+ LOG.error("Failed to resolve address: {}", address);
}
}
return ret;
}
+ /**
+ * Concat nn info with host info to make uniq ID.
+ * This is mainly used when configured nn is
+ * a domain record that has multiple hosts behind it.
+ */
+ static String getConcatNnId(String nsId, String nnId, String hostname, int
port) {
Review comment:
Add the args to the javadoc too.
##########
File path:
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
##########
@@ -426,37 +426,53 @@ static String concatSuffixes(String... suffixes) {
Collection<String> nnIds = getNameNodeIds(conf, nsId);
Map<String, InetSocketAddress> ret = Maps.newLinkedHashMap();
for (String nnId : emptyAsSingletonNull(nnIds)) {
- String suffix = concatSuffixes(nsId, nnId);
- String address = checkKeysAndProcess(defaultValue, suffix, conf, keys);
- if (address != null) {
- InetSocketAddress isa = NetUtils.createSocketAddr(address);
- try {
- // Datanode should just use FQDN
- String[] resolvedHostNames = dnr
- .getAllResolvedHostnameByDomainName(isa.getHostName(), true);
- int port = isa.getPort();
- for (String hostname : resolvedHostNames) {
- InetSocketAddress inetSocketAddress = new InetSocketAddress(
- hostname, port);
- // Concat nn info with host info to make uniq ID
- String concatId;
- if (nnId == null || nnId.isEmpty()) {
- concatId = String
- .join("-", nsId, hostname, String.valueOf(port));
- } else {
- concatId = String
- .join("-", nsId, nnId, hostname, String.valueOf(port));
- }
- ret.put(concatId, inetSocketAddress);
- }
- } catch (UnknownHostException e) {
- LOG.error("Failed to resolve address: " + address);
+ ret.putAll(getResolvedAddressesForNnId(
Review comment:
I would extract the output of the method before doing putAll().
If something breaks, it is easier to debug if it points to the exact line
too.
##########
File path:
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
##########
@@ -426,37 +426,53 @@ static String concatSuffixes(String... suffixes) {
Collection<String> nnIds = getNameNodeIds(conf, nsId);
Map<String, InetSocketAddress> ret = Maps.newLinkedHashMap();
for (String nnId : emptyAsSingletonNull(nnIds)) {
- String suffix = concatSuffixes(nsId, nnId);
- String address = checkKeysAndProcess(defaultValue, suffix, conf, keys);
- if (address != null) {
- InetSocketAddress isa = NetUtils.createSocketAddr(address);
- try {
- // Datanode should just use FQDN
- String[] resolvedHostNames = dnr
- .getAllResolvedHostnameByDomainName(isa.getHostName(), true);
- int port = isa.getPort();
- for (String hostname : resolvedHostNames) {
- InetSocketAddress inetSocketAddress = new InetSocketAddress(
- hostname, port);
- // Concat nn info with host info to make uniq ID
- String concatId;
- if (nnId == null || nnId.isEmpty()) {
- concatId = String
- .join("-", nsId, hostname, String.valueOf(port));
- } else {
- concatId = String
- .join("-", nsId, nnId, hostname, String.valueOf(port));
- }
- ret.put(concatId, inetSocketAddress);
- }
- } catch (UnknownHostException e) {
- LOG.error("Failed to resolve address: " + address);
+ ret.putAll(getResolvedAddressesForNnId(
+ conf, nsId, nnId, dnr, defaultValue, keys));
+ }
+ return ret;
+ }
+
+ public static Map<String, InetSocketAddress> getResolvedAddressesForNnId(
+ Configuration conf, String nsId, String nnId,
+ DomainNameResolver dnr, String defaultValue,
+ String... keys) {
+ String suffix = concatSuffixes(nsId, nnId);
+ String address = checkKeysAndProcess(defaultValue, suffix, conf, keys);
+ Map<String, InetSocketAddress> ret = Maps.newLinkedHashMap();
+ if (address != null) {
+ InetSocketAddress isa = NetUtils.createSocketAddr(address);
+ try {
+ String[] resolvedHostNames = dnr
+ .getAllResolvedHostnameByDomainName(isa.getHostName(), true);
+ int port = isa.getPort();
+ for (String hostname : resolvedHostNames) {
+ InetSocketAddress inetSocketAddress = new InetSocketAddress(
+ hostname, port);
+ // Concat nn info with host info to make uniq ID
+ String concatId = getConcatNnId(nsId, nnId, hostname, port);
+ ret.put(concatId, inetSocketAddress);
}
+ } catch (UnknownHostException e) {
+ LOG.error("Failed to resolve address: {}", address);
}
}
return ret;
}
+ /**
+ * Concat nn info with host info to make uniq ID.
+ * This is mainly used when configured nn is
+ * a domain record that has multiple hosts behind it.
+ */
+ static String getConcatNnId(String nsId, String nnId, String hostname, int
port) {
Review comment:
private?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]