This is an automated email from the ASF dual-hosted git repository.

swagle pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 70d0c08  [AMBARI-23502] Format Namenode step in NN Fed wizard should 
pass in HDFS ClusterId as argument. (aonishuk) (#924)
70d0c08 is described below

commit 70d0c088a4f0dbe232b926e9a48393d2a9cf059d
Author: aonishuk <aonis...@hortonworks.com>
AuthorDate: Fri Apr 6 22:15:38 2018 +0300

    [AMBARI-23502] Format Namenode step in NN Fed wizard should pass in HDFS 
ClusterId as argument. (aonishuk) (#924)
---
 .../libraries/functions/namenode_ha_utils.py       | 65 +++++++++++++++-------
 .../HDFS/2.1.0.2.0/package/scripts/namenode.py     |  5 +-
 2 files changed, 48 insertions(+), 22 deletions(-)

diff --git 
a/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
 
b/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
index ae1a681..4d51e69 100644
--- 
a/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
+++ 
b/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
@@ -40,7 +40,9 @@ NAMENODE_HTTP_FRAGMENT = 'dfs.namenode.http-address.{0}.{1}'
 NAMENODE_HTTPS_FRAGMENT = 'dfs.namenode.https-address.{0}.{1}'
 NAMENODE_RPC_FRAGMENT = 'dfs.namenode.rpc-address.{0}.{1}'
 NAMENODE_RPC_NON_HA = 'dfs.namenode.rpc-address'
-JMX_URI_FRAGMENT = 
"{0}://{1}/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem"
+JMX_URI_DEFAULT = "{0}://{1}/jmx?qry={{0}}"
+JMX_BEAN_FS = "Hadoop:service=NameNode,name=FSNamesystem"
+JMX_BEAN_NN_INFO = "Hadoop:service=NameNode,name=NameNodeInfo"
 INADDR_ANY = '0.0.0.0'
 
 class NoActiveNamenodeException(Fail):
@@ -93,6 +95,30 @@ def _get_namenode_states_noretries_single_ns(hdfs_site, 
name_service, security_e
   standby_namenodes = []
   unknown_namenodes = []
 
+  for nn_unique_id, address, jmx_uri in all_jmx_namenode_addresses(hdfs_site, 
name_service):
+    is_https_enabled = is_https_enabled_in_hdfs(hdfs_site['dfs.http.policy'], 
hdfs_site['dfs.https.enable'])
+    jmx_uri = jmx_uri.format(JMX_BEAN_FS)
+    state = get_value_from_jmx(jmx_uri, 'tag.HAState', security_enabled, 
run_user, is_https_enabled, last_retry)
+    # If JMX parsing failed
+    if not state:
+      check_service_cmd = "hdfs haadmin -ns {0} -getServiceState 
{1}".format(name_service, nn_unique_id)
+      code, out = shell.call(check_service_cmd, logoutput=True, user=run_user)
+      if code == 0 and out:
+        if HDFS_NN_STATE_STANDBY in out:
+          state = HDFS_NN_STATE_STANDBY
+        elif HDFS_NN_STATE_ACTIVE in out:
+          state = HDFS_NN_STATE_ACTIVE
+
+    if state == HDFS_NN_STATE_ACTIVE:
+      active_namenodes.append((nn_unique_id, address))
+    elif state == HDFS_NN_STATE_STANDBY:
+      standby_namenodes.append((nn_unique_id, address))
+    else:
+      unknown_namenodes.append((nn_unique_id, address))
+
+  return active_namenodes, standby_namenodes, unknown_namenodes
+
+def all_jmx_namenode_addresses(hdfs_site, name_service):
   nn_unique_ids_key = 'dfs.ha.namenodes.' + name_service
 
   # now we have something like 'nn1,nn2,nn3,nn4'
@@ -119,28 +145,25 @@ def _get_namenode_states_noretries_single_ns(hdfs_site, 
name_service, security_e
           rpc_host = rpc_value.split(":")[0]
           value = value.replace(INADDR_ANY, rpc_host)
 
-      jmx_uri = JMX_URI_FRAGMENT.format(protocol, value)
-
-      state = get_value_from_jmx(jmx_uri, 'tag.HAState', security_enabled, 
run_user, is_https_enabled, last_retry)
-      # If JMX parsing failed
-      if not state:
-        check_service_cmd = "hdfs haadmin -ns {0} -getServiceState 
{1}".format(name_service, nn_unique_id)
-        code, out = shell.call(check_service_cmd, logoutput=True, 
user=run_user)
-        if code == 0 and out:
-          if HDFS_NN_STATE_STANDBY in out:
-            state = HDFS_NN_STATE_STANDBY
-          elif HDFS_NN_STATE_ACTIVE in out:
-            state = HDFS_NN_STATE_ACTIVE
-
-      if state == HDFS_NN_STATE_ACTIVE:
-        active_namenodes.append((nn_unique_id, value))
-      elif state == HDFS_NN_STATE_STANDBY:
-        standby_namenodes.append((nn_unique_id, value))
-      else:
-        unknown_namenodes.append((nn_unique_id, value))
+    jmx_uri = JMX_URI_DEFAULT.format(protocol, value)
 
-  return active_namenodes, standby_namenodes, unknown_namenodes
+    yield nn_unique_id, value, jmx_uri
+
+
+def get_hdfs_cluster_id_from_jmx(hdfs_site, security_enabled, run_user):
+  name_services = get_nameservices(hdfs_site)
+  for name_service in name_services:
+    for nn_unique_id, address, jmx_uri in 
all_jmx_namenode_addresses(hdfs_site, name_service):
+      jmx_uri = jmx_uri.format(JMX_BEAN_NN_INFO)
+      is_https_enabled = 
is_https_enabled_in_hdfs(hdfs_site['dfs.http.policy'], 
hdfs_site['dfs.https.enable'])
+      state = get_value_from_jmx(jmx_uri, 'ClusterId', security_enabled, 
run_user, is_https_enabled)
+
+      if state:
+        return state
+
+      Logger.info("Cannot get clusterId from {0}".format(jmx_uri))
 
+  raise Fail("Cannot get clsuterId from jmx, since none of the namenodes is 
running/accessible via jmx.")
 
 def _is_ha_config(hdfs_site):
   """
diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
index e4a9845..5506512 100644
--- 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
+++ 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
@@ -51,6 +51,7 @@ from hdfs_namenode import namenode, wait_for_safemode_off, 
refreshProxyUsers, fo
 from hdfs import hdfs, reconfig
 import hdfs_rebalance
 from utils import initiate_safe_zkfc_failover, get_hdfs_binary, 
get_dfsadmin_base_command
+from resource_management.libraries.functions.namenode_ha_utils import 
get_hdfs_cluster_id_from_jmx
 
 # The hash algorithm to use to generate digests/hashes
 HASH_ALGORITHM = hashlib.sha224
@@ -103,8 +104,10 @@ class NameNode(Script):
                 user=params.hdfs_user
         )
 
+    hdfs_cluster_id = get_hdfs_cluster_id_from_jmx(params.hdfs_site, 
params.security_enabled, params.hdfs_user)
+
     # this is run on a new namenode, format needs to be forced
-    Execute(format("hdfs --config {hadoop_conf_dir} namenode -format 
-nonInteractive"),
+    Execute(format("hdfs --config {hadoop_conf_dir} namenode -format 
-nonInteractive -clusterId {hdfs_cluster_id}"),
             user = params.hdfs_user,
             path = [params.hadoop_bin_dir],
             logoutput=True

-- 
To stop receiving notification emails like this one, please contact
swa...@apache.org.

Reply via email to