AMBARI-21432 - Allow Services To Be Stopped During an EU Between Stack Vendors (jonathanhurley)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/cea08809 Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/cea08809 Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/cea08809 Branch: refs/heads/branch-2.5 Commit: cea08809ca6144066fb1961873a23061f60bcfd1 Parents: f3bcd4b Author: Jonathan Hurley <[email protected]> Authored: Sun Jul 9 18:18:22 2017 -0400 Committer: Jonathan Hurley <[email protected]> Committed: Mon Jul 10 20:53:23 2017 -0400 ---------------------------------------------------------------------- .../libraries/functions/conf_select.py | 57 +++++--------------- .../server/state/cluster/ClusterImpl.java | 5 +- .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml | 8 +-- .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml | 8 +-- .../2.0.6/HBASE/test_phoenix_queryserver.py | 23 -------- .../stacks/2.0.6/YARN/test_historyserver.py | 21 +------- 6 files changed, 23 insertions(+), 99 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/ambari/blob/cea08809/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py ---------------------------------------------------------------------- diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py index facf186..4f11633 100644 --- a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py +++ b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py @@ -26,7 +26,6 @@ import os import subprocess # Local Imports -import version import stack_select from resource_management.core import shell from resource_management.libraries.functions.format import format @@ -400,7 +399,6 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False): stack_root = Script.get_stack_root() stack_version = Script.get_stack_version() version = None - allow_setting_conf_select_symlink = False if not Script.in_stack_upgrade(): # During normal operation, the HDP stack must be 2.3 or higher @@ -414,27 +412,10 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False): if not os.path.islink(hadoop_conf_dir) and stack_name and version: version = str(version) - allow_setting_conf_select_symlink = True else: - # During an upgrade/downgrade, which can be a Rolling or Express Upgrade, need to calculate it based on the version - ''' - Whenever upgrading to HDP 2.2, or downgrading back to 2.2, need to use /etc/hadoop/conf - Whenever upgrading to HDP 2.3, or downgrading back to 2.3, need to use a versioned hadoop conf dir - - Type__|_Source_|_Target_|_Direction_____________|_Comment_____________________________________________________________ - Normal| | 2.2 | | Use /etc/hadoop/conf - Normal| | 2.3 | | Use /etc/hadoop/conf, which should be a symlink to <stack-root>/current/hadoop-client/conf - EU | 2.1 | 2.3 | Upgrade | Use versioned <stack-root>/current/hadoop-client/conf - | | | No Downgrade Allowed | Invalid - EU/RU | 2.2 | 2.2.* | Any | Use <stack-root>/current/hadoop-client/conf - EU/RU | 2.2 | 2.3 | Upgrade | Use <stack-root>/$version/hadoop/conf, which should be a symlink destination - | | | Downgrade | Use <stack-root>/current/hadoop-client/conf - EU/RU | 2.3 | 2.3.* | Any | Use <stack-root>/$version/hadoop/conf, which should be a symlink destination - ''' - # The "stack_version" is the desired stack, e.g., 2.2 or 2.3 # In an RU, it is always the desired stack, and doesn't change even during the Downgrade! - # In an RU Downgrade from HDP 2.3 to 2.2, the first thing we do is + # In an RU Downgrade from HDP 2.3 to 2.2, the first thing we do is # rm /etc/[component]/conf and then mv /etc/[component]/conf.backup /etc/[component]/conf if stack_version and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version): hadoop_conf_dir = os.path.join(stack_root, "current", "hadoop-client", "conf") @@ -443,13 +424,16 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False): # is the version upgrading/downgrading to. stack_info = stack_select._get_upgrade_stack() - if stack_info is not None: - stack_name = stack_info[0] - version = stack_info[1] - else: - raise Fail("Unable to get parameter 'version'") - - Logger.info("In the middle of a stack upgrade/downgrade for Stack {0} and destination version {1}, determining which hadoop conf dir to use.".format(stack_name, version)) + if stack_info is None: + raise Fail("Unable to retrieve the upgrade/downgrade stack information from the request") + + stack_name = stack_info[0] + version = stack_info[1] + + Logger.info( + "An upgrade/downgrade for {0}-{1} is in progress, determining which hadoop conf dir to use.".format( + stack_name, version)) + # This is the version either upgrading or downgrading to. if version and check_stack_feature(StackFeature.CONFIG_VERSIONING, version): # Determine if <stack-selector-tool> has been run and if not, then use the current @@ -466,21 +450,6 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False): hadoop_conf_dir = os.path.join(stack_root, version, "hadoop", "conf") Logger.info("Hadoop conf dir: {0}".format(hadoop_conf_dir)) - allow_setting_conf_select_symlink = True - - if allow_setting_conf_select_symlink: - # If not in the middle of an upgrade and on HDP 2.3 or higher, or if - # upgrading stack to version 2.3.0.0 or higher (which may be upgrade or downgrade), then consider setting the - # symlink for /etc/hadoop/conf. - # If a host does not have any HDFS or YARN components (e.g., only ZK), then it will not contain /etc/hadoop/conf - # Therefore, any calls to <conf-selector-tool> will fail. - # For that reason, if the hadoop conf directory exists, then make sure it is set. - if os.path.exists(hadoop_conf_dir): - conf_selector_name = stack_tools.get_stack_tool_name(stack_tools.CONF_SELECTOR_NAME) - Logger.info("The hadoop conf dir {0} exists, will call {1} on it for version {2}".format( - hadoop_conf_dir, conf_selector_name, version)) - select(stack_name, "hadoop", version) - Logger.info("Using hadoop conf dir: {0}".format(hadoop_conf_dir)) return hadoop_conf_dir @@ -588,7 +557,7 @@ def convert_conf_directories_to_symlinks(package, version, dirs, skip_existing_l # <stack-root>/current/[component] is already set to to the correct version, e.g., <stack-root>/[version]/[component] - + select(stack_name, package, version, ignore_errors = True) # Symlink /etc/[component]/conf to /etc/[component]/conf.backup @@ -703,4 +672,4 @@ def _get_backup_conf_directory(old_conf): """ old_parent = os.path.abspath(os.path.join(old_conf, os.pardir)) backup_dir = os.path.join(old_parent, "conf.backup") - return backup_dir + return backup_dir \ No newline at end of file http://git-wip-us.apache.org/repos/asf/ambari/blob/cea08809/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java index e0bd49c..89d529c 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java @@ -1065,9 +1065,12 @@ public class ClusterImpl implements Cluster { } } + if (null != effectiveClusterVersion) { + effectiveClusterVersionId = effectiveClusterVersion.getId(); + } + // cache for later use, but only if the action is completed if (null != effectiveClusterVersion && updateCache) { - effectiveClusterVersionId = effectiveClusterVersion.getId(); upgradeEffectiveVersionCache.put(upgradeId, effectiveClusterVersionId); } } http://git-wip-us.apache.org/repos/asf/ambari/blob/cea08809/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml index e9c8541..a96ede9 100644 --- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml +++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml @@ -37,12 +37,10 @@ </prerequisite-checks> <order> - <!-- NOT SUPPORTED AT THIS TIME <group xsi:type="stop" name="STOP_HIGH_LEVEL_SERVICE_COMPONENTS" title="Stop Components for High-Level Services"> - <direction>UPGRADE</direction> + <service-check>false</service-check> <skippable>true</skippable> <supports-auto-skip-failure>false</supports-auto-skip-failure> - <service-check>false</service-check> <parallel-scheduler/> <service name="FLUME"> @@ -122,9 +120,8 @@ </group> <group xsi:type="stop" name="STOP_LOW_LEVEL_SERVICE_COMPONENTS" title="Stop Components for Core Services"> - <direction>UPGRADE</direction> - <skippable>true</skippable> <service-check>false</service-check> + <skippable>true</skippable> <supports-auto-skip-failure>false</supports-auto-skip-failure> <parallel-scheduler/> @@ -147,7 +144,6 @@ <component>ZOOKEEPER_SERVER</component> </service> </group> - --> <!-- After processing this group, will change the effective Stack of the UpgradeContext object. --> <group xsi:type="update-stack" name="UPDATE_DESIRED_STACK_ID" title="Update Target Stack"> http://git-wip-us.apache.org/repos/asf/ambari/blob/cea08809/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml index e9c8541..a96ede9 100644 --- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml +++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml @@ -37,12 +37,10 @@ </prerequisite-checks> <order> - <!-- NOT SUPPORTED AT THIS TIME <group xsi:type="stop" name="STOP_HIGH_LEVEL_SERVICE_COMPONENTS" title="Stop Components for High-Level Services"> - <direction>UPGRADE</direction> + <service-check>false</service-check> <skippable>true</skippable> <supports-auto-skip-failure>false</supports-auto-skip-failure> - <service-check>false</service-check> <parallel-scheduler/> <service name="FLUME"> @@ -122,9 +120,8 @@ </group> <group xsi:type="stop" name="STOP_LOW_LEVEL_SERVICE_COMPONENTS" title="Stop Components for Core Services"> - <direction>UPGRADE</direction> - <skippable>true</skippable> <service-check>false</service-check> + <skippable>true</skippable> <supports-auto-skip-failure>false</supports-auto-skip-failure> <parallel-scheduler/> @@ -147,7 +144,6 @@ <component>ZOOKEEPER_SERVER</component> </service> </group> - --> <!-- After processing this group, will change the effective Stack of the UpgradeContext object. --> <group xsi:type="update-stack" name="UPDATE_DESIRED_STACK_ID" title="Update Target Stack"> http://git-wip-us.apache.org/repos/asf/ambari/blob/cea08809/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py index 60022e1..1b324d4 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py +++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py @@ -76,8 +76,6 @@ class TestPhoenixQueryServer(RMFTestCase): call_mocks = [(0, None, None)] ) - self.assert_call_to_get_hadoop_conf_dir() - self.assertResourceCalled('Execute', '/usr/hdp/current/phoenix-server/bin/queryserver.py stop', environment = {'JAVA_HOME':'/usr/jdk64/jdk1.8.0_40', @@ -134,8 +132,6 @@ class TestPhoenixQueryServer(RMFTestCase): call_mocks = [(0, None, None)] ) - self.assert_call_to_get_hadoop_conf_dir() - self.assertResourceCalled('Execute', '/usr/hdp/current/phoenix-server/bin/queryserver.py stop', environment = {'JAVA_HOME':'/usr/jdk64/jdk1.8.0_40', @@ -217,18 +213,7 @@ class TestPhoenixQueryServer(RMFTestCase): self.assertNoMoreResources() - def assert_call_to_get_hadoop_conf_dir(self): - # From call to conf_select.get_hadoop_conf_dir() - self.assertResourceCalled("Execute", ("cp", "-R", "-p", "/etc/hadoop/conf", "/etc/hadoop/conf.backup"), - not_if = "test -e /etc/hadoop/conf.backup", - sudo = True) - self.assertResourceCalled("Directory", "/etc/hadoop/conf", - action = ["delete"]) - self.assertResourceCalled("Link", "/etc/hadoop/conf", to="/etc/hadoop/conf.backup") - def assert_configure_default(self): - self.assert_call_to_get_hadoop_conf_dir() - self.assertResourceCalled('Directory', '/etc/hbase', mode = 0755 ) @@ -330,8 +315,6 @@ class TestPhoenixQueryServer(RMFTestCase): ) def assert_configure_secured(self): - self.assert_call_to_get_hadoop_conf_dir() - self.assertResourceCalled('Directory', '/etc/hbase', mode = 0755 ) @@ -459,10 +442,4 @@ class TestPhoenixQueryServer(RMFTestCase): cd_access = 'a', ) self.assertResourceCalledIgnoreEarlier('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'phoenix-server', '2.3.0.0-1234'), sudo=True) - - self.assertResourceCalled("Execute", ("cp", "-R", "-p", "/etc/hadoop/conf", "/etc/hadoop/conf.backup"), - not_if = "test -e /etc/hadoop/conf.backup", - sudo = True) - self.assertResourceCalled("Directory", "/etc/hadoop/conf", action = ["delete"]) - self.assertResourceCalled("Link", "/etc/hadoop/conf", to="/etc/hadoop/conf.backup") self.assertNoMoreResources() http://git-wip-us.apache.org/repos/asf/ambari/blob/cea08809/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py index dfeb4be..ea5b468 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py +++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py @@ -752,15 +752,6 @@ class TestHistoryServer(RMFTestCase): group = 'hadoop', ) - def assert_call_to_get_hadoop_conf_dir(self): - # From call to conf_select.get_hadoop_conf_dir() - self.assertResourceCalled("Execute", ("cp", "-R", "-p", "/etc/hadoop/conf", "/etc/hadoop/conf.backup"), - not_if = "test -e /etc/hadoop/conf.backup", - sudo = True) - self.assertResourceCalled("Directory", "/etc/hadoop/conf", - action = ["delete"]) - self.assertResourceCalled("Link", "/etc/hadoop/conf", to="/etc/hadoop/conf.backup") - @patch.object(functions, "get_stack_version", new = MagicMock(return_value="2.3.0.0-1234")) @patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs") def test_pre_upgrade_restart_23(self, copy_to_hdfs_mock): @@ -786,8 +777,6 @@ class TestHistoryServer(RMFTestCase): self.assertTrue(call("slider", "hadoop", "hdfs", skip=False) in copy_to_hdfs_mock.call_args_list) # From call to conf_select.get_hadoop_conf_dir() - self.assert_call_to_get_hadoop_conf_dir() - self.assert_call_to_get_hadoop_conf_dir() self.assertResourceCalled('HdfsResource', None, immutable_paths = self.DEFAULT_IMMUTABLE_PATHS, @@ -803,11 +792,5 @@ class TestHistoryServer(RMFTestCase): self.assertNoMoreResources() - self.assertEquals(5, mocks_dict['call'].call_count) - self.assertEquals(5, mocks_dict['checked_call'].call_count) - self.assertEquals( - ('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'), - mocks_dict['checked_call'].call_args_list[0][0][0]) - self.assertEquals( - ('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'), - mocks_dict['call'].call_args_list[0][0][0]) + self.assertEquals(1, mocks_dict['call'].call_count) + self.assertEquals(1, mocks_dict['checked_call'].call_count)
