Repository: ambari
Updated Branches:
  refs/heads/trunk 9bcd3f378 -> 21d4aff0b


AMBARI-19344 - HOU Fails To Restart NameNode in non-HA Cluster (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/21d4aff0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/21d4aff0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/21d4aff0

Branch: refs/heads/trunk
Commit: 21d4aff0bb121e0092aee964e43ff4d8a0dbcde9
Parents: 9bcd3f3
Author: Jonathan Hurley <[email protected]>
Authored: Thu Jan 5 16:28:01 2017 -0500
Committer: Jonathan Hurley <[email protected]>
Committed: Fri Jan 6 15:42:13 2017 -0500

----------------------------------------------------------------------
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  | 15 +++++---
 .../HDFS/2.1.0.2.0/package/scripts/namenode.py  | 32 ++++++++---------
 .../3.0.0.3.0/package/scripts/hdfs_namenode.py  | 37 +++++++++++---------
 .../HDFS/3.0.0.3.0/package/scripts/namenode.py  | 36 +++++++++----------
 .../HDP/2.5/upgrades/host-ordered-upgrade.xml   | 13 -------
 .../HDP/2.6/upgrades/host-ordered-upgrade.xml   | 13 -------
 .../python/stacks/2.0.6/HDFS/test_namenode.py   | 10 ++++--
 7 files changed, 71 insertions(+), 85 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/21d4aff0/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
index 96160db..cc03bb3 100644
--- 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
+++ 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
@@ -19,6 +19,8 @@ limitations under the License.
 import os.path
 import time
 
+from ambari_commons import constants
+
 from resource_management.core import shell
 from resource_management.core.source import Template
 from resource_management.core.resources.system import File, Execute, Directory
@@ -120,18 +122,18 @@ def namenode(action=None, hdfs_binary=None, 
do_format=True, upgrade_type=None,
         if not success:
           raise Fail("Could not bootstrap standby namenode")
 
-    if upgrade_type == "rolling" and params.dfs_ha_enabled:
+    if upgrade_type == constants.UPGRADE_TYPE_ROLLING and 
params.dfs_ha_enabled:
       # Most likely, ZKFC is up since RU will initiate the failover command. 
However, if that failed, it would have tried
       # to kill ZKFC manually, so we need to start it if not already running.
       safe_zkfc_op(action, env)
 
     options = ""
-    if upgrade_type == "rolling":
+    if upgrade_type == constants.UPGRADE_TYPE_ROLLING:
       if params.upgrade_direction == Direction.UPGRADE:
         options = "-rollingUpgrade started"
       elif params.upgrade_direction == Direction.DOWNGRADE:
         options = "-rollingUpgrade downgrade"
-    elif upgrade_type == "nonrolling":
+    elif upgrade_type == constants.UPGRADE_TYPE_NON_ROLLING:
       is_previous_image_dir = is_previous_fs_image()
       Logger.info("Previous file system image dir present is 
{0}".format(str(is_previous_image_dir)))
 
@@ -139,6 +141,9 @@ def namenode(action=None, hdfs_binary=None, do_format=True, 
upgrade_type=None,
         options = "-rollingUpgrade started"
       elif params.upgrade_direction == Direction.DOWNGRADE:
         options = "-rollingUpgrade downgrade"
+    elif upgrade_type == constants.UPGRADE_TYPE_HOST_ORDERED:
+      # nothing special to do for HOU - should be very close to a normal 
restart
+      pass
     elif upgrade_type is None and upgrade_suspended is True:
       # the rollingUpgrade flag must be passed in during a suspended upgrade 
when starting NN
       if os.path.exists(namenode_upgrade.get_upgrade_in_progress_marker()):
@@ -194,7 +199,7 @@ def namenode(action=None, hdfs_binary=None, do_format=True, 
upgrade_type=None,
 
     # During an Express Upgrade, NameNode will not leave SafeMode until the 
DataNodes are started,
     # so always disable the Safemode check
-    if upgrade_type == "nonrolling":
+    if upgrade_type == constants.UPGRADE_TYPE_NON_ROLLING:
       ensure_safemode_off = False
 
     # some informative logging separate from the above logic to keep things a 
little cleaner
@@ -561,4 +566,4 @@ def is_this_namenode_active():
   # this this point, this NameNode is neither active nor standby - we must 
wait to ensure it
   # enters at least one of these roles before returning a verdict - the 
annotation will catch
   # this failure and retry the fuction automatically
-  raise Fail(format("The NameNode {namenode_id} is not listed as Active or 
Standby, waiting..."))
\ No newline at end of file
+  raise Fail(format("The NameNode {namenode_id} is not listed as Active or 
Standby, waiting..."))

http://git-wip-us.apache.org/repos/asf/ambari/blob/21d4aff0/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
index 86f68e5..bd05a95 100644
--- 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
+++ 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
@@ -19,20 +19,19 @@ limitations under the License.
 
 import sys
 import os
-import time
 import json
 import tempfile
 from datetime import datetime
 import ambari_simplejson as json # simplejson is much faster comparing to 
Python 2.6 json module and has the same functions set.
 
+from ambari_commons import constants
+
 from resource_management.libraries.script.script import Script
 from resource_management.core.resources.system import Execute, File
 from resource_management.core import shell
 from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import Direction
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import 
check_stack_feature
+from resource_management.libraries.functions.constants import Direction
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.security_commons import 
build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, 
validate_security_config_properties, \
@@ -100,8 +99,9 @@ class NameNode(Script):
     namenode(action="start", hdfs_binary=hdfs_binary, 
upgrade_type=upgrade_type,
       upgrade_suspended=params.upgrade_suspended, env=env)
 
-    # after starting NN in an upgrade, touch the marker file
-    if upgrade_type is not None:
+    # after starting NN in an upgrade, touch the marker file - but only do 
this for certain
+    # upgrade types - not all upgrades actually tell NN about the upgrade 
(like HOU)
+    if upgrade_type in (constants.UPGRADE_TYPE_ROLLING, 
constants.UPGRADE_TYPE_NON_ROLLING):
       # place a file on the system indicating that we've submitting the 
command that
       # instructs NN that it is now part of an upgrade
       namenode_upgrade.create_upgrade_marker()
@@ -110,7 +110,7 @@ class NameNode(Script):
     import params
     env.set_params(params)
     hdfs_binary = self.get_hdfs_binary()
-    if upgrade_type == "rolling" and params.dfs_ha_enabled:
+    if upgrade_type == constants.UPGRADE_TYPE_ROLLING and 
params.dfs_ha_enabled:
       if params.dfs_ha_automatic_failover_enabled:
         initiate_safe_zkfc_failover()
       else:
@@ -184,25 +184,23 @@ class NameNodeDefault(NameNode):
 
   def finalize_non_rolling_upgrade(self, env):
     hfds_binary = self.get_hdfs_binary()
-    namenode_upgrade.finalize_upgrade("nonrolling", hfds_binary)
+    namenode_upgrade.finalize_upgrade(constants.UPGRADE_TYPE_NON_ROLLING, 
hfds_binary)
 
   def finalize_rolling_upgrade(self, env):
     hfds_binary = self.get_hdfs_binary()
-    namenode_upgrade.finalize_upgrade("rolling", hfds_binary)
+    namenode_upgrade.finalize_upgrade(constants.UPGRADE_TYPE_ROLLING, 
hfds_binary)
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing Stack Upgrade pre-restart")
     import params
     env.set_params(params)
 
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, 
params.version):
-      # When downgrading an Express Upgrade, the first thing we do is to 
revert the symlinks.
-      # Therefore, we cannot call this code in that scenario.
-      call_if = [("rolling", "upgrade"), ("rolling", "downgrade"), 
("nonrolling", "upgrade")]
-      for e in call_if:
-        if (upgrade_type, params.upgrade_direction) == e:
-          conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-hdfs-namenode", params.version)
+    # When downgrading an Express Upgrade, the first thing we do is to revert 
the symlinks.
+    # Therefore, we cannot call this code in that scenario.
+    if upgrade_type != constants.UPGRADE_TYPE_NON_ROLLING or 
params.upgrade_direction != Direction.DOWNGRADE:
+      conf_select.select(params.stack_name, "hadoop", params.version)
+
+    stack_select.select("hadoop-hdfs-namenode", params.version)
 
   def post_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing Stack Upgrade post-restart")

http://git-wip-us.apache.org/repos/asf/ambari/blob/21d4aff0/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
 
b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
index 4ac56fc..2c0d691 100644
--- 
a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
+++ 
b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/hdfs_namenode.py
@@ -19,6 +19,8 @@ limitations under the License.
 import os.path
 import time
 
+from ambari_commons import constants
+
 from resource_management.core import shell
 from resource_management.core.source import Template
 from resource_management.core.resources.system import File, Execute, Directory
@@ -119,18 +121,18 @@ def namenode(action=None, hdfs_binary=None, 
do_format=True, upgrade_type=None,
         if not success:
           raise Fail("Could not bootstrap standby namenode")
 
-    if upgrade_type == "rolling" and params.dfs_ha_enabled:
+    if upgrade_type == constants.UPGRADE_TYPE_ROLLING and 
params.dfs_ha_enabled:
       # Most likely, ZKFC is up since RU will initiate the failover command. 
However, if that failed, it would have tried
       # to kill ZKFC manually, so we need to start it if not already running.
       safe_zkfc_op(action, env)
 
     options = ""
-    if upgrade_type == "rolling":
+    if upgrade_type == constants.UPGRADE_TYPE_ROLLING:
       if params.upgrade_direction == Direction.UPGRADE:
         options = "-rollingUpgrade started"
       elif params.upgrade_direction == Direction.DOWNGRADE:
         options = "-rollingUpgrade downgrade"
-    elif upgrade_type == "nonrolling":
+    elif upgrade_type == constants.UPGRADE_TYPE_NON_ROLLING:
       is_previous_image_dir = is_previous_fs_image()
       Logger.info("Previous file system image dir present is 
{0}".format(str(is_previous_image_dir)))
 
@@ -138,6 +140,9 @@ def namenode(action=None, hdfs_binary=None, do_format=True, 
upgrade_type=None,
         options = "-rollingUpgrade started"
       elif params.upgrade_direction == Direction.DOWNGRADE:
         options = "-rollingUpgrade downgrade"
+    elif upgrade_type == constants.UPGRADE_TYPE_HOST_ORDERED:
+      # nothing special to do for HOU - should be very close to a normal 
restart
+      pass
     elif upgrade_type is None and upgrade_suspended is True:
       # the rollingUpgrade flag must be passed in during a suspended upgrade 
when starting NN
       if os.path.exists(namenode_upgrade.get_upgrade_in_progress_marker()):
@@ -193,7 +198,7 @@ def namenode(action=None, hdfs_binary=None, do_format=True, 
upgrade_type=None,
 
     # During an Express Upgrade, NameNode will not leave SafeMode until the 
DataNodes are started,
     # so always disable the Safemode check
-    if upgrade_type == "nonrolling":
+    if upgrade_type == constants.UPGRADE_TYPE_NON_ROLLING:
       ensure_safemode_off = False
 
     # some informative logging separate from the above logic to keep things a 
little cleaner
@@ -221,7 +226,7 @@ def namenode(action=None, hdfs_binary=None, do_format=True, 
upgrade_type=None,
   elif action == "stop":
     import params
     service(
-      action="stop", name="namenode", 
+      action="stop", name="namenode",
       user=params.hdfs_user
     )
   elif action == "status":
@@ -288,7 +293,7 @@ def create_hdfs_directories():
                        owner=params.smoke_user,
                        mode=params.smoke_hdfs_user_mode,
   )
-  params.HdfsResource(None, 
+  params.HdfsResource(None,
                       action="execute",
   )
 
@@ -355,15 +360,15 @@ def is_namenode_formatted(params):
     if os.path.isdir(mark_dir):
       marked = True
       Logger.info(format("{mark_dir} exists. Namenode DFS already formatted"))
-    
+
   # Ensure that all mark dirs created for all name directories
   if marked:
     for mark_dir in mark_dirs:
       Directory(mark_dir,
         create_parents = True
-      )      
-    return marked  
-  
+      )
+    return marked
+
   # Move all old format markers to new place
   for old_mark_dir in old_mark_dirs:
     if os.path.isdir(old_mark_dir):
@@ -374,7 +379,7 @@ def is_namenode_formatted(params):
         marked = True
       Directory(old_mark_dir,
         action = "delete"
-      )    
+      )
     elif os.path.isfile(old_mark_dir):
       for mark_dir in mark_dirs:
         Directory(mark_dir,
@@ -384,7 +389,7 @@ def is_namenode_formatted(params):
         action = "delete"
       )
       marked = True
-      
+
   if marked:
     return True
 
@@ -403,7 +408,7 @@ def is_namenode_formatted(params):
     except Fail:
       Logger.info(format("NameNode will not be formatted since {name_dir} 
exists and contains content"))
       return True
-       
+
   return False
 
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
@@ -414,13 +419,13 @@ def decommission():
   conf_dir = params.hadoop_conf_dir
   user_group = params.user_group
   nn_kinit_cmd = params.nn_kinit_cmd
-  
+
   File(params.exclude_file_path,
        content=Template("exclude_hosts_list.j2"),
        owner=hdfs_user,
        group=user_group
   )
-  
+
   if not params.update_exclude_file_only:
     Execute(nn_kinit_cmd,
             user=hdfs_user
@@ -560,4 +565,4 @@ def is_this_namenode_active():
   # this this point, this NameNode is neither active nor standby - we must 
wait to ensure it
   # enters at least one of these roles before returning a verdict - the 
annotation will catch
   # this failure and retry the fuction automatically
-  raise Fail(format("The NameNode {namenode_id} is not listed as Active or 
Standby, waiting..."))
\ No newline at end of file
+  raise Fail(format("The NameNode {namenode_id} is not listed as Active or 
Standby, waiting..."))

http://git-wip-us.apache.org/repos/asf/ambari/blob/21d4aff0/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
 
b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
index 86f68e5..08578bc 100644
--- 
a/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
+++ 
b/ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/namenode.py
@@ -19,20 +19,19 @@ limitations under the License.
 
 import sys
 import os
-import time
 import json
 import tempfile
 from datetime import datetime
 import ambari_simplejson as json # simplejson is much faster comparing to 
Python 2.6 json module and has the same functions set.
 
+from ambari_commons import constants
+
 from resource_management.libraries.script.script import Script
 from resource_management.core.resources.system import Execute, File
 from resource_management.core import shell
 from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import Direction
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import 
check_stack_feature
+from resource_management.libraries.functions.constants import Direction
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.security_commons import 
build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, 
validate_security_config_properties, \
@@ -100,8 +99,9 @@ class NameNode(Script):
     namenode(action="start", hdfs_binary=hdfs_binary, 
upgrade_type=upgrade_type,
       upgrade_suspended=params.upgrade_suspended, env=env)
 
-    # after starting NN in an upgrade, touch the marker file
-    if upgrade_type is not None:
+    # after starting NN in an upgrade, touch the marker file - but only do 
this for certain
+    # upgrade types - not all upgrades actually tell NN about the upgrade 
(like HOU)
+    if upgrade_type in (constants.UPGRADE_TYPE_ROLLING, 
constants.UPGRADE_TYPE_NON_ROLLING):
       # place a file on the system indicating that we've submitting the 
command that
       # instructs NN that it is now part of an upgrade
       namenode_upgrade.create_upgrade_marker()
@@ -110,7 +110,7 @@ class NameNode(Script):
     import params
     env.set_params(params)
     hdfs_binary = self.get_hdfs_binary()
-    if upgrade_type == "rolling" and params.dfs_ha_enabled:
+    if upgrade_type == constants.UPGRADE_TYPE_ROLLING and 
params.dfs_ha_enabled:
       if params.dfs_ha_automatic_failover_enabled:
         initiate_safe_zkfc_failover()
       else:
@@ -184,25 +184,23 @@ class NameNodeDefault(NameNode):
 
   def finalize_non_rolling_upgrade(self, env):
     hfds_binary = self.get_hdfs_binary()
-    namenode_upgrade.finalize_upgrade("nonrolling", hfds_binary)
+    namenode_upgrade.finalize_upgrade(constants.UPGRADE_TYPE_NON_ROLLING, 
hfds_binary)
 
   def finalize_rolling_upgrade(self, env):
     hfds_binary = self.get_hdfs_binary()
-    namenode_upgrade.finalize_upgrade("rolling", hfds_binary)
+    namenode_upgrade.finalize_upgrade(constants.UPGRADE_TYPE_ROLLING, 
hfds_binary)
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing Stack Upgrade pre-restart")
     import params
     env.set_params(params)
 
-    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, 
params.version):
-      # When downgrading an Express Upgrade, the first thing we do is to 
revert the symlinks.
-      # Therefore, we cannot call this code in that scenario.
-      call_if = [("rolling", "upgrade"), ("rolling", "downgrade"), 
("nonrolling", "upgrade")]
-      for e in call_if:
-        if (upgrade_type, params.upgrade_direction) == e:
-          conf_select.select(params.stack_name, "hadoop", params.version)
-      stack_select.select("hadoop-hdfs-namenode", params.version)
+    # When downgrading an Express Upgrade, the first thing we do is to revert 
the symlinks.
+    # Therefore, we cannot call this code in that scenario.
+    if upgrade_type != constants.UPGRADE_TYPE_NON_ROLLING or 
params.upgrade_direction != Direction.DOWNGRADE:
+      conf_select.select(params.stack_name, "hadoop", params.version)
+
+    stack_select.select("hadoop-hdfs-namenode", params.version)
 
   def post_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing Stack Upgrade post-restart")
@@ -353,11 +351,11 @@ class NameNodeDefault(NameNode):
       File(ccache_file_path,
            action = "delete",
       )
-      
+
   def get_log_folder(self):
     import params
     return params.hdfs_log_dir
-  
+
   def get_user(self):
     import params
     return params.hdfs_user

http://git-wip-us.apache.org/repos/asf/ambari/blob/21d4aff0/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/host-ordered-upgrade.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/host-ordered-upgrade.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/host-ordered-upgrade.xml
index f6480bf..11c59e9 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/host-ordered-upgrade.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/host-ordered-upgrade.xml
@@ -34,12 +34,6 @@
   <order>
     <group xsi:type="cluster" name="PRE_CLUSTER" title="Prepare Upgrade">
       <direction>UPGRADE</direction>
-      <execute-stage service="HDFS" component="NAMENODE" title="Pre Upgrade 
HDFS">
-        <task xsi:type="execute" hosts="master">
-          <script>scripts/namenode.py</script>
-          <function>prepare_rolling_upgrade</function>
-        </task>
-      </execute-stage>
 
       <execute-stage service="TEZ" component="TEZ_CLIENT" title="Check Tez 
Tarball">
         <task xsi:type="execute" hosts="any">
@@ -113,13 +107,6 @@
         </task>
       </execute-stage>
 
-      <execute-stage service="HDFS" component="NAMENODE" title="Execute HDFS 
Finalize">
-        <task xsi:type="execute" hosts="master">
-          <script>scripts/namenode.py</script>
-          <function>finalize_rolling_upgrade</function>
-        </task>
-      </execute-stage>
-
       <execute-stage title="Save Cluster State">
         <task xsi:type="server_action" 
class="org.apache.ambari.server.serveraction.upgrades.FinalizeUpgradeAction">
         </task>

http://git-wip-us.apache.org/repos/asf/ambari/blob/21d4aff0/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/host-ordered-upgrade.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/host-ordered-upgrade.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/host-ordered-upgrade.xml
index 1b29af3..e7aace9 100644
--- 
a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/host-ordered-upgrade.xml
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/host-ordered-upgrade.xml
@@ -34,12 +34,6 @@
   <order>
     <group xsi:type="cluster" name="PRE_CLUSTER" title="Prepare Upgrade">
       <direction>UPGRADE</direction>
-      <execute-stage service="HDFS" component="NAMENODE" title="Pre Upgrade 
HDFS">
-        <task xsi:type="execute" hosts="master">
-          <script>scripts/namenode.py</script>
-          <function>prepare_rolling_upgrade</function>
-        </task>
-      </execute-stage>
 
       <execute-stage service="TEZ" component="TEZ_CLIENT" title="Check Tez 
Tarball">
         <task xsi:type="execute" hosts="any">
@@ -113,13 +107,6 @@
         </task>
       </execute-stage>
 
-      <execute-stage service="HDFS" component="NAMENODE" title="Execute HDFS 
Finalize">
-        <task xsi:type="execute" hosts="master">
-          <script>scripts/namenode.py</script>
-          <function>finalize_rolling_upgrade</function>
-        </task>
-      </execute-stage>
-
       <execute-stage title="Save Cluster State">
         <task xsi:type="server_action" 
class="org.apache.ambari.server.serveraction.upgrades.FinalizeUpgradeAction">
         </task>

http://git-wip-us.apache.org/repos/asf/ambari/blob/21d4aff0/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py 
b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index fc043b8..f76a7d7 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -1512,8 +1512,10 @@ class TestNamenode(RMFTestCase):
                        config_dict = json_content,
                        stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
-                       call_mocks = [(0, None), (0, None)],
+                       call_mocks = [(0, None, None), (0, None), (0, None)],
                        mocks_dict = mocks_dict)
+
+    self.assertResourceCalled('Link', '/etc/hadoop/conf', 
to='/usr/hdp/current/hadoop-client/conf')
     self.assertResourceCalled('Execute', ('ambari-python-wrap', 
'/usr/bin/hdp-select', 'set', 'hadoop-hdfs-namenode', version), sudo=True)
     self.assertNoMoreResources()
 
@@ -1746,8 +1748,12 @@ class TestNamenode(RMFTestCase):
                        config_dict = json_content,
                        stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
-                       call_mocks = itertools.cycle([(0, None)]),
+                       call_mocks = itertools.cycle([(0, None, None)]),
                        mocks_dict = mocks_dict)
+
+    self.assertResourceCalled('Link', '/etc/hadoop/conf',
+      to = '/usr/hdp/current/hadoop-client/conf')
+
     import sys
     self.assertEquals("/usr/hdp/2.3.0.0-1234/hadoop/conf", 
sys.modules["params"].hadoop_conf_dir)
     self.assertEquals("/usr/hdp/2.3.0.0-1234/hadoop/libexec", 
sys.modules["params"].hadoop_libexec_dir)

Reply via email to