http://git-wip-us.apache.org/repos/asf/ambari/blob/1427726a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.5.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.5.xml new file mode 100644 index 0000000..dc668a7 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.5.xml @@ -0,0 +1,1613 @@ +<?xml version="1.0"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + + +<upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> + <target>2.5.*.*</target> + <target-stack>HDP-2.5</target-stack> + <type>NON_ROLLING</type> + <prerequisite-checks> + <!-- List of additional pre-req checks to run in addition to the required pre-reqs --> + <check>org.apache.ambari.server.checks.KafkaKerberosCheck</check> + + <configuration> + <!-- Configuration properties for all pre-reqs including required pre-reqs --> + <check-properties name="org.apache.ambari.server.checks.HiveDynamicServiceDiscoveryCheck"> + <property name="min-failure-stack-version">HDP-2.3.0.0</property> + </check-properties> + </configuration> + </prerequisite-checks> + + <!-- Instructs the upgrade pack how to build the configuration pack --> + <upgrade-path> + <intermediate-stack version="2.3"/> + <intermediate-stack version="2.4"/> + </upgrade-path> + + <order> + <group xsi:type="cluster" name="PRE_CLUSTER" title="Prepare Upgrade"> + <direction>UPGRADE</direction> + + <skippable>true</skippable> + <supports-auto-skip-failure>false</supports-auto-skip-failure> + + <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues"> + <task xsi:type="manual"> + <message>Before continuing, please stop all YARN queues. If yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to true, then you can skip this step since the clients will retry on their own.</message> + </task> + </execute-stage> + + <execute-stage service="SLIDER" component="SLIDER" title="Stop Long Running Applications on Slider"> + <task xsi:type="manual"> + <message>Before continuing, please stop all long-running applications deployed using Slider. E.g., su - yarn "/usr/hdp/current/slider-client/bin/slider stop <app_name>"</message> + </task> + </execute-stage> + + <execute-stage service="STORM" component="NIMBUS" title="Stop Storm Topologies"> + <task xsi:type="manual"> + <message>Before continuing, please deactivate and kill any currently running topologies.</message> + </task> + </execute-stage> + + <!-- Large Cluster Hack: May need to upload new tez tarball on your own to HDFS since all services are stopped. --> + <!-- + <execute-stage service="TEZ" component="TEZ_CLIENT" title="Check Tez Tarball"> + <task xsi:type="execute" hosts="any"> + <script>scripts/pre_upgrade.py</script> + <function>prepare</function> + </task> + </execute-stage> + --> + </group> + + <group xsi:type="stop" name="STOP_HIGH_LEVEL_SERVICE_COMPONENTS" title="Stop Components for High-Level Services"> + <!-- Large Cluster Hack: assume all components are already stopped. --> + <direction>DOWNGRADE</direction> + + <skippable>true</skippable> + <supports-auto-skip-failure>false</supports-auto-skip-failure> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + <service-check>false</service-check> + + <service name="ATLAS"> + <component>ATLAS_SERVER</component> + </service> + + <service name="FLUME"> + <component>FLUME_HANDLER</component> + </service> + + <service name="ACCUMULO"> + <component>ACCUMULO_TRACER</component> + <component>ACCUMULO_GC</component> + <component>ACCUMULO_TSERVER</component> + <component>ACCUMULO_MONITOR</component> + <component>ACCUMULO_MASTER</component> + </service> + + <service name="STORM"> + <component>DRPC_SERVER</component> + <component>STORM_UI_SERVER</component> + <component>SUPERVISOR</component> + <component>NIMBUS</component> + </service> + + <service name="KNOX"> + <component>KNOX_GATEWAY</component> + </service> + + <service name="KAFKA"> + <component>KAFKA_BROKER</component> + </service> + + <service name="FALCON"> + <component>FALCON_SERVER</component> + </service> + + <service name="OOZIE"> + <component>OOZIE_SERVER</component> + </service> + + <service name="SPARK"> + <component>SPARK_JOBHISTORYSERVER</component> + <component>SPARK_THRIFTSERVER</component> + </service> + + <service name="HIVE"> + <component>WEBHCAT_SERVER</component> + <component>HIVE_SERVER</component> + <component>HIVE_METASTORE</component> + </service> + + <service name="YARN"> + <component>NODEMANAGER</component> + <component>RESOURCEMANAGER</component> + <component>APP_TIMELINE_SERVER</component> + </service> + + <service name="MAPREDUCE2"> + <component>HISTORYSERVER</component> + </service> + </group> + + <group xsi:type="cluster" name="Backups" title="Perform Backups"> + <direction>UPGRADE</direction> + <skippable>true</skippable> + <supports-auto-skip-failure>false</supports-auto-skip-failure> + + <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Backup Oozie Database"> + <task xsi:type="manual"> + <message>Before continuing, please backup the Oozie Server database referenced by the Oozie server located on {{hosts.all}}.</message> + </task> + </execute-stage> + + <execute-stage service="HIVE" component="HIVE_METASTORE" title="Backup Hive Metastore"> + <task xsi:type="manual"> + <message>Before continuing, please backup the Hive Metastore database referenced by the Hive Metastore service(s) located on the following host(s): {{hosts.all}}.</message> + </task> + </execute-stage> + + <!-- Large Cluster Hack: backup HBASE manually --> + <execute-stage service="HBASE" component="HBASE_MASTER" title="Snapshot HBase"> + <task xsi:type="manual"> + <message>Before continuing, please ensure you have taken a snapshot of HBase while it was running. echo 'snapshot_all' | /usr/hdp/current/hbase-client/bin/hbase shell</message> + </task> + </execute-stage> + <!-- + <execute-stage service="HBASE" component="HBASE_MASTER" title="Snapshot HBASE"> + <task xsi:type="execute" hosts="master"> + <script>scripts/hbase_upgrade.py</script> + <function>take_snapshot</function> + </task> + </execute-stage> + --> + + <!-- Large Cluster Hack: backup HDFS manually --> + <execute-stage service="HDFS" component="NAMENODE" title="Backup HDFS"> + <task xsi:type="manual"> + <message>Before continuing, please ensure you have backed up HDFS while it was running.</message> + <message>1. Enter safe mode.</message> + <message>2. As the HDFS user, /usr/hdp/current/hadoop-client/bin/hdfs dfsadmin -saveNamespace</message> + <message>3. Backup each NameNode Dir. E.g., cp -ar ${dfs.namenode.name.dir}/current $backup_location</message> + </task> + </execute-stage> + + <!-- + <execute-stage service="HDFS" component="NAMENODE" title="Prepare HDFS"> + <task xsi:type="execute" hosts="master"> + <script>scripts/namenode.py</script> + <function>prepare_express_upgrade</function> + </task> + </execute-stage> + --> + + <execute-stage service="RANGER" component="RANGER_ADMIN" title="Backup Ranger Database"> + <task xsi:type="manual"> + <message>Before continuing, please backup the Ranger Admin database and Ranger Audit database on the following host(s): {{hosts.all}}. If audit database size is too large (greater than 3GB) then follow the below instructions: + 1. Backup the audit table from audit database. + 2. Truncate audit table. + 3. Follow upgrade process and once completed then restore audit data to audit table.</message> + </task> + </execute-stage> + </group> + + <group xsi:type="stop" name="STOP_LOW_LEVEL_SERVICE_COMPONENTS" title="Stop Components for Core Services"> + <!-- Large Cluster Hack: assume all components are already stopped. --> + <direction>DOWNGRADE</direction> + + <skippable>true</skippable> + <supports-auto-skip-failure>false</supports-auto-skip-failure> + <service-check>false</service-check> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + + <service name="HBASE"> + <component>HBASE_REGIONSERVER</component> + <component>HBASE_MASTER</component> + <component>PHOENIX_QUERY_SERVER</component> + </service> + + <service name="KAFKA"> + <component>KAFKA_BROKER</component> + </service> + + <service name="HDFS"> + <component>DATANODE</component> + <component>NAMENODE</component> + <component>SECONDARY_NAMENODE</component> + <component>ZKFC</component> + <component>JOURNALNODE</component> + <component>NFS_GATEWAY</component> + </service> + + <service name="RANGER"> + <component>RANGER_USERSYNC</component> + <component>RANGER_ADMIN</component> + </service> + + <service name="ZOOKEEPER"> + <component>ZOOKEEPER_SERVER</component> + </service> + </group> + + <group xsi:type="cluster" name="Restore Backups" title="Restore Backups"> + <direction>DOWNGRADE</direction> + <skippable>true</skippable> + + <!-- If the user attempts a downgrade after this point, they will need to restore backups + before starting any of the services. --> + + <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Restore Oozie Database"> + <task xsi:type="manual"> + <message>Before continuing, please restore the Oozie Server database referenced by the Oozie server located on {{hosts.all}}.</message> + </task> + </execute-stage> + + <execute-stage service="HIVE" component="HIVE_METASTORE" title="Restore Hive Metastore"> + <task xsi:type="manual"> + <message>Before continuing, please restore the Hive Metastore database referenced by the Hive Metastore service(s) located on the following host(s): {{hosts.all}}.</message> + </task> + </execute-stage> + + <execute-stage service="RANGER" component="RANGER_ADMIN" title="Restore Ranger Database"> + <task xsi:type="manual"> + <message>Before continuing, please restore the Ranger Admin database and Ranger Audit database on the following host(s): {{hosts.all}}.</message> + </task> + </execute-stage> + </group> + + <!-- + On an HDP 2.4 to 2.2 downgrade, the /etc/component/conf must be turned back from a symlink + into a normal directory. This will avoid the circular symbolic link that will happen after + hdp-select sets /usr/hdp/current/component back to 2.2: + + # new symlink created for 2.3 and higher + /etc/flume/conf -> /usr/hdp/current/flume-server/conf + + # circular reference from 2.2 (current) + /usr/hdp/2.2.0.0-1234/flume/conf -> /etc/flume/conf + /usr/hdp/current/flume-server/conf -> /etc/flume/conf + --> + <group xsi:type="cluster" name="RESTORE_CONFIG_DIRS" title="Restore Configuration Directories"> + <direction>DOWNGRADE</direction> + <execute-stage title="Restore configuration directories and remove HDP 2.3+ symlinks"> + <task xsi:type="execute"> + <script>scripts/ru_set_all.py</script> + <function>unlink_all_configs</function> + </task> + </execute-stage> + </group> + + <!-- After processing this group, will change the effective Stack of the UpgradeContext object. --> + <group xsi:type="update-stack" name="UPDATE_DESIRED_STACK_ID" title="Update Target Stack"> + <execute-stage title="Update Target Stack" service="" component=""> + <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.UpdateDesiredStackAction"> + </task> + </execute-stage> + </group> + + <group xsi:type="cluster" name="Upgrade service configs" title="Upgrade service configs"> + <direction>UPGRADE</direction> <!-- prevent config changes on downgrade --> + <skippable>true</skippable> <!-- May fix configuration problems manually --> + <supports-auto-skip-failure>false</supports-auto-skip-failure> + + <!-- RANGER --> + <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger"> + <task xsi:type="configure" id="hdp_2_3_0_0_update_ranger_env"/> + </execute-stage> + + <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger"> + <task xsi:type="configure" id="hdp_2_3_0_0_set_ranger_admin_ssl_flag"/> + </execute-stage> + + <!-- This task must be done before hdp_2_5_0_0_remove_audit_db_admin_properties and + before hdp_2_5_0_0_remove_audit_db_ranger_admin_site --> + <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger"> + <task xsi:type="configure" id="hdp_2_3_0_0_update_ranger_admin"/> + </execute-stage> + + <execute-stage service="RANGER" component="RANGER_ADMIN" title="Calculating Ranger Properties"> + <task xsi:type="server_action" summary="Calculating Ranger Properties" class="org.apache.ambari.server.serveraction.upgrades.RangerConfigCalculation" /> + </execute-stage> + + <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger"> + <task xsi:type="configure" id="hdp_2_3_0_0_update_ranger_usersync"/> + </execute-stage> + + <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger"> + <task xsi:type="configure" id="hdp_2_3_0_0_update_ranger_site"/> + </execute-stage> + + <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger"> + <task xsi:type="configure" id="hdp_2_3_0_0_update_ranger_usersync_sync_source"/> + </execute-stage> + + <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger"> + <task xsi:type="configure" id="hdp_2_3_0_0_update_ranger_usersync_properties"/> + </execute-stage> + + <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger"> + <task xsi:type="configure" id="hdp_2_3_0_0_update_ranger_delete_oracle_home"/> + </execute-stage> + + <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger"> + <task xsi:type="configure" id="hdp_2_3_0_0_update_ranger_admin_hdfs_audit"/> + </execute-stage> + + <!-- This is not needed since this property is removed in HDP 2.5 in hdp_2_5_0_0_remove_audit_db_flag --> + <!-- + <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger"> + <task xsi:type="configure" id="hdp_2_3_0_0_update_ranger_admin_db_audit"/> + </execute-stage> + --> + + <!-- HDFS --> + <!-- Already taken care of by hdp_2_5_0_0_namenode_ha_adjustments --> + <!-- + <execute-stage service="HDFS" component="NAMENODE" title="Apply config changes for NameNode"> + <task xsi:type="configure" id="hdp_2_3_0_0_namenode_ha_adjustments"/> + </execute-stage> + --> + + <execute-stage service="HDFS" component="NAMENODE" title="Apply config changes for NameNode"> + <task xsi:type="configure" id="hdp_2_3_0_0_modify_hadoop_env"/> + </execute-stage> + + <execute-stage service="HDFS" component="NAMENODE" title="Apply config changes for NameNode"> + <task xsi:type="configure" id="hdp_2_3_0_0_hdfs_adjust_ranger_plugin"/> + </execute-stage> + + <execute-stage service="HDFS" component="NAMENODE" title="Apply config changes for NameNode"> + <task xsi:type="configure" id="hdp_2_3_0_0_hdfs_transition_ranger_hdfs_policy"/> + </execute-stage> + + <execute-stage service="HDFS" component="NAMENODE" title="Apply config changes for NameNode"> + <task xsi:type="configure" id="hdp_2_3_0_0_hdfs_transition_ranger_hdfs_security"/> + </execute-stage> + + <execute-stage service="HDFS" component="NAMENODE" title="Apply config changes for NameNode"> + <task xsi:type="configure" id="hdp_2_3_0_0_hdfs_ranger_hdfs_delete_old_properties"/> + </execute-stage> + + <!-- HDFS HDP 2.5 --> + <execute-stage service="HDFS" component="NAMENODE" title="Apply config changes for Hdfs Namenode HA"> + <task xsi:type="configure" id="hdp_2_5_0_0_namenode_ha_adjustments"/> + </execute-stage> + + <!-- Combine hdp_2_3_0_0_hdfs_transition_ranger_hdfs_audit and hdp_2_5_0_0_remove_ranger_hdfs_audit_db into + hdp_2_2_to_2_5_hdfs_remove_ranger_audit_db to transfer and delete properties. + --> + <execute-stage service="HDFS" component="NAMENODE" title="Remove Ranger HDFS Audit DB"> + <task xsi:type="configure" id="hdp_2_2_to_2_5_hdfs_remove_ranger_audit_db"/> + </execute-stage> + + <!-- YARN --> + <execute-stage service="MAPREDUCE2" component="HISTORYSERVER" title="Apply config changes for HistoryServer"> + <task xsi:type="configure" id="hdp_2_3_0_0_mapreduce2_adjust_history_server"/> + </execute-stage> + + <execute-stage service="MAPREDUCE2" component="MAPREDUCE2_CLIENT" title="Apply config changes for Mapreduce2 client"> + <task xsi:type="server_action" summary="Verifying LZO codec path for mapreduce" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath"/> + </execute-stage> + + <execute-stage service="YARN" component="APP_TIMELINE_SERVER" title="Apply config changes for AppTimelineServer"> + <task xsi:type="configure" id="hdp_2_3_0_0_yarn_ats_enable_recovery"/> + </execute-stage> + + <execute-stage service="YARN" component="APP_TIMELINE_SERVER" title="Apply config changes for AppTimelineServer"> + <task xsi:type="configure" id="hdp_2_3_0_0_yarn_keep_ats_v1"/> + </execute-stage> + + <execute-stage service="YARN" component="RESOURCEMANAGER" title="Apply config changes for ResourceManager"> + <task xsi:type="configure" id="hdp_2_3_0_0_yarn_rm_disable_node_labels"/> + </execute-stage> + + <execute-stage service="YARN" component="RESOURCEMANAGER" title="Apply config changes for ResourceManager"> + <task xsi:type="configure" id="hdp_2_3_0_0_yarn_rm_clear_default_node_label_expression"/> + </execute-stage> + + <execute-stage service="YARN" component="RESOURCEMANAGER" title="Apply config changes for ResourceManager"> + <task xsi:type="configure" id="hdp_2_3_0_0_yarn_rm_check_cs_root_def_capacity"/> + </execute-stage> + + <execute-stage service="YARN" component="RESOURCEMANAGER" title="Apply config changes for ResourceManager"> + <task xsi:type="configure" id="hdp_2_3_0_0_yarn_rm_check_cs_root_max_capacity"/> + </execute-stage> + + <execute-stage service="YARN" component="RESOURCEMANAGER" title="Calculating Yarn Properties for Spark Shuffle"> + <task xsi:type="server_action" summary="Calculating Yarn Properties for Spark" class="org.apache.ambari.server.serveraction.upgrades.SparkShufflePropertyConfig" /> + </execute-stage> + + <!-- YARN 2.5 --> + <execute-stage service="YARN" component="RESOURCEMANAGER" title="Apply config changes for Yarn Resourcemanager"> + <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_yarn_audit_db"/> + </execute-stage> + + <execute-stage service="YARN" component="NODEMANAGER" title="Add Spark2 shuffle"> + <task xsi:type="configure" id="hdp_2_5_0_0_add_spark2_yarn_shuffle"/> + </execute-stage> + + <!--HBASE--> + <execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for HBase Master"> + <task xsi:type="configure" id="hdp_2_3_0_0_hbase_master_adjust_phoenix_scheduler_factory"/> + </execute-stage> + + <execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for HBase Master"> + <task xsi:type="configure" id="hdp_2_3_0_0_hbase_master_adjust_phoenix_rpc_controller_factory"/> + </execute-stage> + + <execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for HBase Master"> + <task xsi:type="configure" id="hdp_2_3_0_0_hbase_master_set_global_memstore_size"/> + </execute-stage> + + <execute-stage service="HBASE" component="HBASE_MASTER" title="Calculating HBase Properties"> + <task xsi:type="server_action" summary="Calculating HBase Properties" class="org.apache.ambari.server.serveraction.upgrades.HBaseConfigCalculation" /> + </execute-stage> + + <execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for HBase Master"> + <task xsi:type="configure" id="hdp_2_3_0_0_hbase_master_adjust_phoenix_indexed_wal_edit_codec"/> + </execute-stage> + + <execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for HBase Master"> + <task xsi:type="configure" id="hdp_2_3_0_0_hbase_master_adjust_authorization_coprocessors"/> + </execute-stage> + + <execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for HBase Master"> + <task xsi:type="configure" id="hdp_2_3_0_0_hbase_master_transition_ranger_hbase_policy"/> + </execute-stage> + + <execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for HBase Master"> + <task xsi:type="configure" id="hdp_2_3_0_0_hbase_master_transition_ranger_hbase_audit"/> + </execute-stage> + + <execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for HBase Master"> + <task xsi:type="configure" id="hdp_2_3_0_0_hbase_master_copy_ranger_policies"/> + </execute-stage> + + <execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for HBase Master"> + <task xsi:type="configure" id="hdp_2_3_0_0_hbase_master_delete_old_ranger_properties"/> + </execute-stage> + + <!-- HBASE --> + <!-- These HBASE configs changed in HDP 2.3.4.0, but Ambari can't distinguish HDP 2.3.2.0 vs HDP 2.3.4.0, so easier to always do them. --> + <execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for HBASE Master"> + <task xsi:type="configure" id="hdp_2_3_4_0_hbase_remove_local_indexing"/> + </execute-stage> + + <!-- HBASE HDP 2.5 --> + <!-- Combine hdp_2_3_0_0_hbase_master_transition_ranger_hbase_audit and hdp_2_5_0_0_remove_ranger_hbase_audit_db into + hdp_2_2_to_2_5_remove_ranger_hbase_audit_db + --> + <execute-stage service="HBASE" component="HBASE_MASTER" title="Remove Ranger HBASE Audit DB"> + <task xsi:type="configure" id="hdp_2_2_to_2_5_remove_ranger_hbase_audit_db"/> + </execute-stage> + + <!--TEZ--> + <execute-stage service="TEZ" component="TEZ_CLIENT" title="Apply config changes for Tez"> + <task xsi:type="configure" id="hdp_2_3_0_0_tez_client_adjust_properties"/> + </execute-stage> + + <execute-stage service="TEZ" component="TEZ_CLIENT" title="Apply config changes for Tez"> + <task xsi:type="configure" id="hdp_2_2_0_0_tez_client_adjust_tez_counters_properties"/> + </execute-stage> + + <execute-stage service="TEZ" component="TEZ_CLIENT" title="Apply config changes for Tez"> + <task xsi:type="configure" id="hdp_2_3_0_0_tez_keep_ats_v1"/> + </execute-stage> + + <!-- Tez HDP 2.5 --> + <execute-stage service="TEZ" component="TEZ_CLIENT" title="Verify LZO codec path for Tez"> + <task xsi:type="server_action" summary="Verifying LZO codec path for Tez" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath"/> + </execute-stage> + + <!-- Only one stack version needs to update tez.lib.uris property. --> + <execute-stage service="TEZ" component="TEZ_CLIENT" title="Apply config changes for Tez"> + <task xsi:type="configure" id="hdp_2_5_0_0_tez_client_adjust_tez_lib_uris_property"/> + </execute-stage> + + <!--HIVE--> + <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server"> + <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_replace_auth_manager"/> + </execute-stage> + + <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server"> + <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_configure_authentication"/> + </execute-stage> + + <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server"> + <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_configure_ranger_policy"/> + </execute-stage> + + <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server"> + <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_configure_ranger_security"/> + </execute-stage> + + <!-- Combine hdp_2_3_0_0_hive_server_configure_ranger_audit and hdp_2_5_0_0_remove_ranger_hive_audit_db + into hdp_2_2_to_2_5_remove_ranger_hive_audit_db + --> + <execute-stage service="HIVE" component="HIVE_SERVER" title="Remove Ranger Hive Audit DB"> + <task xsi:type="configure" id="hdp_2_2_to_2_5_remove_ranger_hive_audit_db"/> + </execute-stage> + + <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server"> + <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_configure_ranger_audit"/> + </execute-stage> + + <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server"> + <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_remove_deprecated_ranger_properties"/> + </execute-stage> + + <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server"> + <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_remove_datastore_classname"/> + </execute-stage> + + <execute-stage service="HIVE" component="WEBHCAT_SERVER" title="Apply config changes for WebHCat Server"> + <task xsi:type="configure" id="hdp_2_3_0_0_webhcat_server_update_env"/> + </execute-stage> + + <!-- Only one of these is needed since a similar ID exists for HDP 2.3 --> + <execute-stage service="HIVE" component="WEBHCAT_SERVER" title="Apply config changes for WebHCat Server"> + <task xsi:type="configure" id="hdp_2_4_0_0_webhcat_server_update_configuration_paths"/> + </execute-stage> + + <!-- HIVE HDP 2.5 --> + <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server"> + <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hive_audit_db"/> + </execute-stage> + + <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server"> + <!-- Remove Atlas configs that were incorrectly added to hive-site instead of Atlas' application.properties. --> + <task xsi:type="configure" id="hdp_2_5_0_0_remove_hive_atlas_configs"/> + </execute-stage> + + <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server"> + <task xsi:type="server_action" summary="Update hive-env content" class="org.apache.ambari.server.serveraction.upgrades.HiveEnvClasspathAction"/> + </execute-stage> + + <!--OOZIE--> + <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Apply config changes for Oozie Server"> + <task xsi:type="configure" id="hdp_2_3_0_0_oozie_remove_redundant_configurations"/> + </execute-stage> + + <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Apply config changes for Oozie Server"> + <task xsi:type="configure" id="hdp_2_4_0_0_oozie_remove_service_classes" /> + </execute-stage> + + <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Apply config changes for Oozie Server"> + <task xsi:type="server_action" summary="Adjusting Oozie properties" class="org.apache.ambari.server.serveraction.upgrades.OozieConfigCalculation"/> + </execute-stage> + + <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Apply config changes for Oozie Server"> + <task xsi:type="server_action" summary="Adjusting Oozie properties" class="org.apache.ambari.server.serveraction.upgrades.OozieConfigCalculation"/> + <task xsi:type="server_action" summary="Fix oozie admin users" class="org.apache.ambari.server.serveraction.upgrades.FixOozieAdminUsers"/> + </execute-stage> + + <!-- KNOX --> + <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Apply config changes for Knox Gateway"> + <task xsi:type="configure" id="hdp_2_3_0_0_knox_configure_ranger_policy"/> + </execute-stage> + + <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Apply config changes for Knox Gateway"> + <task xsi:type="configure" id="hdp_2_3_0_0_knox_configure_ranger_knox_audit"/> + </execute-stage> + + <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Apply config changes for Knox Gateway"> + <task xsi:type="configure" id="hdp_2_3_0_0_knox_remove_deprecated_ranger_properties"/> + </execute-stage> + + <!-- KNOX HDP 2.5 --> + <!-- Combine hdp_2_3_0_0_knox_configure_ranger_knox_audit and hdp_2_5_0_0_remove_ranger_knox_audit_db + into hdp_2_2_to_2_5_remove_ranger_knox_audit_db + --> + <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Apply config changes for Knox Gateway"> + <task xsi:type="configure" id="hdp_2_2_to_2_5_remove_ranger_knox_audit_db"/> + </execute-stage> + + <!--FALCON--> + <execute-stage service="FALCON" component="FALCON_SERVER" title="Apply config changes for Falcon"> + <task xsi:type="configure" id="hdp_2_5_0_0_falcon_server_adjust_services_property"/> + </execute-stage> + + <!-- RANGER --> + <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin"> + <task xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_flag"/> + </execute-stage> + + <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin"> + <task xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_admin_properties"/> + </execute-stage> + + <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin"> + <task xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_ranger_admin_site"/> + </execute-stage> + + <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin"> + <task xsi:type="configure" id="hdp_2_5_0_0_remove_sso_property"/> + </execute-stage> + + <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin"> + <task xsi:type="configure" id="hdp_2_5_0_0_set_external_solrCloud_flag"/> + </execute-stage> + + <execute-stage service="RANGER" component="RANGER_ADMIN" title="Calculating Ranger Properties"> + <task xsi:type="server_action" summary="Calculating Ranger Properties" class="org.apache.ambari.server.serveraction.upgrades.RangerKerberosConfigCalculation"/> + </execute-stage> + + <execute-stage service="RANGER" component="RANGER_ADMIN" title="Configuring Ranger Alerts"> + <task xsi:type="server_action" summary="Configuring Ranger Alerts" class="org.apache.ambari.server.serveraction.upgrades.RangerWebAlertConfigAction"/> + </execute-stage> + + <!-- SQOOP --> + <execute-stage service="SQOOP" component="SQOOP" title="Apply config changes for Sqoop to remove Atlas Configs"> + <!-- Remove Atlas configs that were incorrectly added to sqoop-site instead of Atlas' application.properties. --> + <task xsi:type="configure" id="hdp_2_5_0_0_remove_sqoop_atlas_configs"/> + </execute-stage> + + <execute-stage service="SQOOP" component="SQOOP" title="Apply config changes for Sqoop if the cluster is Kerberized"> + <!-- If cluster is Kerberized, add configs to sqoop-atlas-application.properties, + which will be written to the local file system if Atlas is present. --> + <task xsi:type="configure" id="hdp_2_5_0_0_add_sqoop_atlas_security_configs" /> + </execute-stage> + + <!--STORM--> + <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Nimbus"> + <task xsi:type="configure" id="hdp_2_3_0_0_nimbus_monitor_freq_adjustment"/> + </execute-stage> + + <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Nimbus"> + <task xsi:type="configure" id="hdp_2_3_0_0_nimbus_convert_nimbus_host_to_seeds"/> + </execute-stage> + + <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Nimbus"> + <task xsi:type="configure" id="hdp_2_3_0_0_update_storm_env"/> + </execute-stage> + + <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Nimbus"> + <task xsi:type="configure" id="hdp_2_3_0_0_nimbus_update_env_vars"/> + </execute-stage> + + <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Nimbus"> + <task xsi:type="configure" id="hdp_2_3_0_0_nimbus_configure_ranger_policy"/> + </execute-stage> + + <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Nimbus"> + <task xsi:type="configure" id="hdp_2_3_0_0_nimbus_configure_ranger_audit"/> + </execute-stage> + + <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Nimbus"> + <task xsi:type="configure" id="hdp_2_3_0_0_nimbus_remove_deprecated_ranger_properties"/> + </execute-stage> + + <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Nimbus"> + <task xsi:type="configure" id="hdp_2_3_0_0_remove_empty_storm_topology_submission_notifier_plugin_class"/> + </execute-stage> + + <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Nimbus"> + <task xsi:type="configure" id="increase_storm_zookeeper_timeouts"/> + </execute-stage> + + <!-- STORM HDP 2.5 --> + + <!-- Combine hdp_2_3_0_0_nimbus_configure_ranger_audit and hdp_2_5_0_0_remove_ranger_storm_audit_db into + hdp_2_2_to_2_5_remove_ranger_storm_audit_db + --> + <execute-stage service="STORM" component="NIMBUS" title="Remove Ranger Storm Audit DB"> + <task xsi:type="configure" id="hdp_2_2_to_2_5_remove_ranger_storm_audit_db"/> + </execute-stage> + + <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Nimbus"> + <task xsi:type="configure" id="hdp_2_5_0_0_upgrade_storm_1.0"/> + </execute-stage> + <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Nimbus in KerberosDescriptor"> + <task xsi:type="server_action" summary="Upgrade Storm Security Configs to 1.0" + class="org.apache.ambari.server.serveraction.upgrades.StormUpgradeKerberosDescriptorConfig"/> + </execute-stage> + + <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm"> + <!-- Remove Atlas configs that were incorrectly added to storm-site instead of Atlas' application.properties. --> + <task xsi:type="configure" id="hdp_2_5_0_0_remove_storm_atlas_configs"/> + <!-- Add nimbus.impersonation acls . --> + <task xsi:type="configure" id="hdp_2_5_0_0_add_storm_security_configs" /> + </execute-stage> + + <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm"> + <task xsi:type="configure" id="hdp_2_4_0_0_remove_empty_storm_topology_submission_notifier_plugin_class"/> + </execute-stage> + + <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Nimbus"> + <task xsi:type="configure" id="increase_storm_zookeeper_timeouts"/> + </execute-stage> + + <!-- KAFKA --> + <execute-stage service="KAFKA" component="KAFKA_BROKER" title="Apply config changes for Kafka Broker"> + <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kafka_audit_db"/> + </execute-stage> + + <!-- SPARK --> + <execute-stage service="SPARK" component="SPARK_JOBHISTORYSERVER" title="Apply config changes for Spark JobHistoryServer"> + <task xsi:type="configure" id="hdp_2_5_0_0_spark_jobhistoryserver"/> + </execute-stage> + </group> + + <!-- + After processing this group, the user-specified Kerberos descriptor will be updated to work with + the new stack-level Kerberos descriptor. + --> + <group xsi:type="cluster" name="UPDATE_KERBEROS_DESCRIPTORS" title="Update Kerberos Descriptors"> + <execute-stage title="Update the user-specified Kerberos descriptor" service="" component=""> + <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.UpgradeUserKerberosDescriptor"/> + </execute-stage> + </group> + + <!-- + Invoke "hdp-select set all" to change any components we may have missed + that are installed on the hosts but not known by Ambari. + --> + <group xsi:type="cluster" name="ALL_HOST_OPS" title="Set Version On All Hosts"> + <skippable>true</skippable> + <supports-auto-skip-failure>false</supports-auto-skip-failure> + + <execute-stage title="Update stack to {{version}}"> + <!-- Large Cluster Hack: batch this command. --> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + + <task xsi:type="execute"> + <script>scripts/ru_set_all.py</script> + <function>actionexecute</function> + </task> + </execute-stage> + </group> + + <!-- Now, restart all of the services. --> + <group xsi:type="restart" name="ZOOKEEPER" title="ZooKeeper"> + <service-check>false</service-check> + <skippable>true</skippable> + <supports-auto-skip-failure>false</supports-auto-skip-failure> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + <service name="ZOOKEEPER"> + <service-check>true</service-check> + <component>ZOOKEEPER_SERVER</component> + <component>ZOOKEEPER_CLIENT</component> + </service> + </group> + + <group xsi:type="restart" name="RANGER" title="Ranger"> + <service-check>false</service-check> + <skippable>true</skippable> + <supports-auto-skip-failure>false</supports-auto-skip-failure> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + <service name="RANGER"> + <component>RANGER_ADMIN</component> + <component>RANGER_USERSYNC</component> + </service> + </group> + + <group xsi:type="restart" name="HDFS" title="HDFS"> + <service-check>false</service-check> + <skippable>true</skippable> + <supports-auto-skip-failure>false</supports-auto-skip-failure> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + <service name="HDFS"> + <component>JOURNALNODE</component> + <component>ZKFC</component> + <component>NAMENODE</component> + <component>SECONDARY_NAMENODE</component> + <component>NFS_GATEWAY</component> + <component>HDFS_CLIENT</component> + </service> + </group> + + <group xsi:type="restart" name="HDFS_DATANODES" title="HDFS DataNodes"> + <service-check>false</service-check> + <skippable>true</skippable> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + <service name="HDFS"> + <component>DATANODE</component> + </service> + </group> + + <group xsi:type="cluster" name="HDFS_LEAVE_SAFEMODE" title="HDFS - Wait to leave Safemode"> + <service-check>false</service-check> + <skippable>true</skippable> + <supports-auto-skip-failure>false</supports-auto-skip-failure> + + <execute-stage service="HDFS" component="NAMENODE" title="Wait to leave Safemode"> + <task xsi:type="execute" hosts="all" summary="Wait for NameNode to leave Safemode"> + <script>scripts/namenode.py</script> + <function>wait_for_safemode_off</function> + </task> + </execute-stage> + </group> + + <group xsi:type="restart" name="KAFKA" title="Kafka"> + <service-check>false</service-check> + <skippable>true</skippable> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + <service name="KAFKA"> + <component>KAFKA_BROKER</component> + </service> + </group> + + <group xsi:type="restart" name="YARN_AND_MAPR" title="YARN and MapReduce2"> + <service-check>false</service-check> + <skippable>true</skippable> + <supports-auto-skip-failure>false</supports-auto-skip-failure> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + + <service name="MAPREDUCE2"> + <component>HISTORYSERVER</component> + <component>MAPREDUCE2_CLIENT</component> + </service> + + <service name="YARN"> + <component>APP_TIMELINE_SERVER</component> + <component>RESOURCEMANAGER</component> + <component>YARN_CLIENT</component> + </service> + </group> + + <group xsi:type="restart" name="YARN_NODEMANAGERS" title="YARN NodeManagers"> + <service-check>false</service-check> + <skippable>true</skippable> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + + <service name="YARN"> + <component>NODEMANAGER</component> + </service> + </group> + + <group xsi:type="restart" name="HBASE" title="HBASE"> + <service-check>false</service-check> + <skippable>true</skippable> + <supports-auto-skip-failure>false</supports-auto-skip-failure> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + <service name="HBASE"> + <component>HBASE_MASTER</component> + <component>HBASE_REGIONSERVER</component> + <component>HBASE_CLIENT</component> + <component>PHOENIX_QUERY_SERVER</component> + </service> + </group> + + <group xsi:type="restart" name="CLIENTS" title="Tez, Pig, Sqoop Clients"> + <service-check>false</service-check> + <skippable>true</skippable> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + <service name="TEZ"> + <component>TEZ_CLIENT</component> + </service> + + <service name="MAHOUT"> + <component>MAHOUT</component> + </service> + + <service name="PIG"> + <component>PIG</component> + </service> + + <service name="SQOOP"> + <component>SQOOP</component> + </service> + </group> + + <group name="SERVICE_CHECK_1" title="All Service Checks" xsi:type="service-check"> + <skippable>true</skippable> + <direction>UPGRADE</direction> + <priority> + <service>ZOOKEEPER</service> + <service>RANGER</service> + <service>HDFS</service> + <service>KAFKA</service> + <service>YARN</service> + <service>MAPREDUCE2</service> + <service>HBASE</service> + </priority> + </group> + + <group xsi:type="restart" name="HIVE_MASTERS" title="Hive Masters"> + <service-check>false</service-check> + <skippable>true</skippable> + <supports-auto-skip-failure>false</supports-auto-skip-failure> + <!-- Must be ran sequentially because Hive Metastore upgrades the schema and Hive Server copies tarballs. --> + <parallel-scheduler> + <max-degree-of-parallelism>1</max-degree-of-parallelism> + </parallel-scheduler> + <service name="HIVE"> + <component>MYSQL_SERVER</component> + <component>HIVE_METASTORE</component> + <component>HIVE_SERVER</component> + <component>WEBHCAT_SERVER</component> + </service> + </group> + + <group xsi:type="restart" name="HIVE_CLIENTS" title="Hive Clients"> + <service-check>false</service-check> + <skippable>true</skippable> + <supports-auto-skip-failure>false</supports-auto-skip-failure> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + <service name="HIVE"> + <component>HIVE_CLIENT</component> + <component>HCAT</component> + </service> + </group> + + <group xsi:type="restart" name="SPARK" title="Spark"> + <service-check>false</service-check> + <skippable>true</skippable> + <supports-auto-skip-failure>false</supports-auto-skip-failure> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + <service name="SPARK"> + <component>SPARK_JOBHISTORYSERVER</component> + <component>SPARK_THRIFTSERVER</component> + </service> + </group> + + <group xsi:type="restart" name="SPARK_CLIENTS" title="Spark Clients"> + <service-check>false</service-check> + <skippable>true</skippable> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + <service name="SPARK"> + <component>SPARK_CLIENT</component> + </service> + </group> + + <group xsi:type="restart" name="OOZIE" title="Oozie"> + <service-check>false</service-check> + <skippable>true</skippable> + <supports-auto-skip-failure>false</supports-auto-skip-failure> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + <service name="OOZIE"> + <component>OOZIE_SERVER</component> + </service> + </group> + + <group xsi:type="restart" name="OOZIE_CLIENTS" title="Oozie Clients"> + <service-check>false</service-check> + <skippable>true</skippable> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + <service name="OOZIE"> + <component>OOZIE_CLIENT</component> + </service> + </group> + + <group name="SERVICE_CHECK_2" title="All Service Checks" xsi:type="service-check"> + <skippable>true</skippable> + <direction>UPGRADE</direction> + <priority> + <service>HIVE</service> + <service>OOZIE</service> + <service>SPARK</service> + </priority> + </group> + + <group xsi:type="restart" name="FALCON" title="Falcon"> + <service-check>false</service-check> + <skippable>true</skippable> + <supports-auto-skip-failure>false</supports-auto-skip-failure> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + <service name="FALCON"> + <component>FALCON_SERVER</component> + </service> + </group> + + <group xsi:type="restart" name="FALCON_CLIENTS" title="Falcon Clients"> + <service-check>false</service-check> + <skippable>true</skippable> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + <service name="FALCON"> + <component>FALCON_CLIENT</component> + </service> + </group> + + <group xsi:type="restart" name="KNOX" title="Knox"> + <service-check>false</service-check> + <skippable>true</skippable> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + <service name="KNOX"> + <component>KNOX_GATEWAY</component> + </service> + </group> + + <group xsi:type="restart" name="STORM" title="Storm"> + <service-check>false</service-check> + <skippable>true</skippable> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + <service name="STORM"> + <component>NIMBUS</component> + <component>SUPERVISOR</component> + <component>STORM_UI_SERVER</component> + <component>DRPC_SERVER</component> + </service> + + <execute-stage service="STORM" component="DRPC_SERVER" title="Rebuild Storm Topology"> + <task xsi:type="manual"> + <message>Please rebuild your topology using the new Storm version dependencies and resubmit it using the newly created jar.</message> + </task> + </execute-stage> + </group> + + <group xsi:type="restart" name="SLIDER" title="Slider"> + <service-check>false</service-check> + <skippable>true</skippable> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + <service name="SLIDER"> + <component>SLIDER</component> + </service> + </group> + + <group xsi:type="restart" name="FLUME" title="Flume"> + <service-check>false</service-check> + <skippable>true</skippable> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + <service name="FLUME"> + <component>FLUME_HANDLER</component> + </service> + </group> + + <group xsi:type="restart" name="ACCUMULO" title="Accumulo"> + <service-check>false</service-check> + <skippable>true</skippable> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + <service name="ACCUMULO"> + <component>ACCUMULO_MASTER</component> + <component>ACCUMULO_TSERVER</component> + <component>ACCUMULO_MONITOR</component> + <component>ACCUMULO_GC</component> + <component>ACCUMULO_TRACER</component> + <component>ACCUMULO_CLIENT</component> + </service> + </group> + + <group name="SERVICE_CHECK_3" title="All Service Checks" xsi:type="service-check"> + <skippable>true</skippable> + <direction>UPGRADE</direction> + <priority> + <service>FALCON</service> + <service>KNOX</service> + <service>STORM</service> + <service>SLIDER</service> + <service>FLUME</service> + <service>ACCUMULO</service> + </priority> + </group> + + <group xsi:type="cluster" name="FINALIZE_PRE_CHECK" title="Finalize {{direction.text.proper}} Pre-Check"> + <direction>UPGRADE</direction> + + <execute-stage title="Check Component Versions"> + <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.ComponentVersionCheckAction" /> + </execute-stage> + </group> + + <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}"> + <skippable>true</skippable> + <supports-auto-skip-failure>false</supports-auto-skip-failure> + + <execute-stage title="Confirm Finalize"> + <direction>UPGRADE</direction> + <task xsi:type="manual"> + <message>Please confirm you are ready to finalize.</message> + </task> + </execute-stage> + + <execute-stage service="HDFS" component="NAMENODE" title="Execute HDFS Finalize"> + <task xsi:type="execute" hosts="master"> + <script>scripts/namenode.py</script> + <function>finalize_non_rolling_upgrade</function> + </task> + </execute-stage> + + <execute-stage title="Save Cluster State" service="" component=""> + <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.FinalizeUpgradeAction"> + </task> + </execute-stage> + </group> + </order> + + <processing> + <service name="ZOOKEEPER"> + <component name="ZOOKEEPER_SERVER"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="ZOOKEEPER_CLIENT"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + </service> + + <service name="RANGER"> + <component name="RANGER_ADMIN"> + <pre-upgrade> + + <task xsi:type="execute" hosts="all"> + <script>scripts/ranger_admin.py</script> + <function>set_pre_start</function> + </task> + + <task xsi:type="execute" hosts="any" summary="Upgrading Ranger database schema"> + <script>scripts/ranger_admin.py</script> + <function>setup_ranger_database</function> + </task> + + <task xsi:type="configure_function" hosts="all" /> + + <task xsi:type="execute" hosts="any" summary="Applying Ranger java patches"> + <script>scripts/ranger_admin.py</script> + <function>setup_ranger_java_patches</function> + </task> + </pre-upgrade> + + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + + </component> + + <component name="RANGER_USERSYNC"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + </service> + + <service name="HDFS"> + <component name="NAMENODE"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="SECONDARY_NAMENODE"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="DATANODE"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="HDFS_CLIENT"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="JOURNALNODE"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="ZKFC"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="NFS_GATEWAY"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + </service> + + <service name="KAFKA"> + <component name="KAFKA_BROKER"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + </service> + + <service name="MAPREDUCE2"> + <component name="HISTORYSERVER"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="MAPREDUCE2_CLIENT"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + </service> + + <service name="YARN"> + <component name="APP_TIMELINE_SERVER"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="RESOURCEMANAGER"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="NODEMANAGER"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="YARN_CLIENT"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + </service> + + <service name="HBASE"> + <component name="HBASE_MASTER"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="HBASE_REGIONSERVER"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="HBASE_CLIENT"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + </service> + + <service name="TEZ"> + <component name="TEZ_CLIENT"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + </service> + + <service name="MAHOUT"> + <component name="MAHOUT"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + </service> + + <service name="PIG"> + <component name="PIG"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + </service> + + <service name="SQOOP"> + <component name="SQOOP"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + </service> + + <service name="HIVE"> + <component name="MYSQL_SERVER"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="HIVE_METASTORE"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="HIVE_SERVER"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="WEBHCAT_SERVER"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="HIVE_CLIENT"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="HCAT"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + </service> + + <service name="SPARK"> + <component name="SPARK_JOBHISTORYSERVER"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + <component name="SPARK_THRIFTSERVER"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + <component name="SPARK_CLIENT"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + </service> + + <service name="OOZIE"> + <component name="OOZIE_SERVER"> + <pre-upgrade> + <!-- It is extremely important that both of these tasks run on the exact same host. Hence, pick the first alphabetically. --> + <task xsi:type="configure_function" hosts="first" /> + + <task xsi:type="execute" hosts="first" sequential="true" summary="Upgrading the Oozie database and creating a new sharelib"> + <script>scripts/oozie_server_upgrade.py</script> + <function>upgrade_oozie_database_and_sharelib</function> + </task> + </pre-upgrade> + + <pre-downgrade> + <task xsi:type="execute" hosts="any" sequential="true" summary="Create a new sharelib"> + <script>scripts/oozie_server_upgrade.py</script> + <function>create_sharelib</function> + </task> + </pre-downgrade> + + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="OOZIE_CLIENT"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + </service> + + <service name="FALCON"> + <component name="FALCON_SERVER"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + <component name="FALCON_CLIENT"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + </service> + + <service name="KNOX"> + <component name="KNOX_GATEWAY"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + </service> + + <service name="STORM"> + <component name="NIMBUS"> + <pre-upgrade> + <task xsi:type="execute" hosts="any" summary="Removing Storm data from ZooKeeper"> + <script>scripts/storm_upgrade.py</script> + <function>delete_storm_zookeeper_data</function> + </task> + + <task xsi:type="execute" summary="Removing local Storm data"> + <script>scripts/storm_upgrade.py</script> + <function>delete_storm_local_data</function> + </task> + </pre-upgrade> + + <pre-downgrade> + <task xsi:type="execute" hosts="any" summary="Removing Storm data from ZooKeeper"> + <script>scripts/storm_upgrade.py</script> + <function>delete_storm_zookeeper_data</function> + </task> + + <task xsi:type="execute" summary="Removing local Storm data"> + <script>scripts/storm_upgrade.py</script> + <function>delete_storm_local_data</function> + </task> + </pre-downgrade> + + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="SUPERVISOR"> + <pre-upgrade> + <task xsi:type="execute" summary="Removing local Storm data"> + <script>scripts/storm_upgrade.py</script> + <function>delete_storm_local_data</function> + </task> + </pre-upgrade> + + <pre-downgrade> + <task xsi:type="manual"> + <message>Before continuing, please deactivate and kill any currently running topologies.</message> + </task> + + <task xsi:type="execute" summary="Removing local Storm data"> + <script>scripts/storm_upgrade.py</script> + <function>delete_storm_local_data</function> + </task> + </pre-downgrade> + + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="STORM_UI_SERVER"> + <pre-upgrade> + <task xsi:type="execute" summary="Removing local Storm data"> + <script>scripts/storm_upgrade.py</script> + <function>delete_storm_local_data</function> + </task> + </pre-upgrade> + + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="DRPC_SERVER"> + <pre-upgrade> + <task xsi:type="execute" summary="Removing local Storm data"> + <script>scripts/storm_upgrade.py</script> + <function>delete_storm_local_data</function> + </task> + </pre-upgrade> + + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + + <post-upgrade> + <task xsi:type="manual"> + <message>Please rebuild your topology using the new Storm version dependencies and resubmit it using the newly created jar.</message> + </task> + </post-upgrade> + </component> + </service> + + <service name="ACCUMULO"> + <component name="ACCUMULO_MASTER"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="ACCUMULO_TSERVER"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="ACCUMULO_MONITOR"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="ACCUMULO_GC"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="ACCUMULO_TRACER"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + + <component name="ACCUMULO_CLIENT"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + </service> + + <service name="SLIDER"> + <component name="SLIDER"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + </service> + + <service name="FLUME"> + <component name="FLUME_HANDLER"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + </service> + </processing> +</upgrade>
http://git-wip-us.apache.org/repos/asf/ambari/blob/1427726a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml index 4e49097..4d64b93 100644 --- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml +++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml @@ -37,7 +37,7 @@ <group xsi:type="cluster" name="PRE_CLUSTER" title="Prepare Upgrade"> <direction>UPGRADE</direction> - <skippable>true</skippable> + <skippable>true</skippable> <supports-auto-skip-failure>false</supports-auto-skip-failure> <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop YARN Queues"> @@ -58,18 +58,26 @@ </task> </execute-stage> + <!-- Large Cluster Hack: May need to upload new tez tarball on your own to HDFS since all services are stopped. --> + <!-- <execute-stage service="TEZ" component="TEZ_CLIENT" title="Check Tez Tarball"> <task xsi:type="execute" hosts="any"> <script>scripts/pre_upgrade.py</script> <function>prepare</function> </task> </execute-stage> + --> </group> <group xsi:type="stop" name="STOP_HIGH_LEVEL_SERVICE_COMPONENTS" title="Stop Components for High-Level Services"> + <!-- Large Cluster Hack: assume all components are already stopped. --> + <direction>DOWNGRADE</direction> + <skippable>true</skippable> <supports-auto-skip-failure>false</supports-auto-skip-failure> - <parallel-scheduler/> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> <service-check>false</service-check> <service name="ATLAS"> @@ -99,6 +107,10 @@ <component>KNOX_GATEWAY</component> </service> + <service name="KAFKA"> + <component>KAFKA_BROKER</component> + </service> + <service name="FALCON"> <component>FALCON_SERVER</component> </service> @@ -146,19 +158,39 @@ </task> </execute-stage> + <!-- Large Cluster Hack: backup HBASE manually --> + <execute-stage service="HBASE" component="HBASE_MASTER" title="Snapshot HBase"> + <task xsi:type="manual"> + <message>Before continuing, please ensure you have taken a snapshot of HBase while it was running. echo 'snapshot_all' | /usr/hdp/current/hbase-client/bin/hbase shell</message> + </task> + </execute-stage> + <!-- <execute-stage service="HBASE" component="HBASE_MASTER" title="Snapshot HBASE"> <task xsi:type="execute" hosts="master"> <script>scripts/hbase_upgrade.py</script> <function>take_snapshot</function> </task> </execute-stage> + --> + <!-- Large Cluster Hack: backup HDFS manually --> + <execute-stage service="HDFS" component="NAMENODE" title="Backup HDFS"> + <task xsi:type="manual"> + <message>Before continuing, please ensure you have backed up HDFS while it was running.</message> + <message>1. Enter safe mode.</message> + <message>2. As the HDFS user, /usr/hdp/current/hadoop-client/bin/hdfs dfsadmin -saveNamespace</message> + <message>3. Backup each NameNode Dir. E.g., cp -ar ${dfs.namenode.name.dir}/current $backup_location</message> + </task> + </execute-stage> + + <!-- <execute-stage service="HDFS" component="NAMENODE" title="Prepare HDFS"> <task xsi:type="execute" hosts="master"> <script>scripts/namenode.py</script> <function>prepare_express_upgrade</function> </task> </execute-stage> + --> <execute-stage service="RANGER" component="RANGER_ADMIN" title="Backup Ranger Database"> <task xsi:type="manual"> @@ -177,10 +209,15 @@ </group> <group xsi:type="stop" name="STOP_LOW_LEVEL_SERVICE_COMPONENTS" title="Stop Components for Core Services"> + <!-- Large Cluster Hack: assume all components are already stopped. --> + <direction>DOWNGRADE</direction> + <skippable>true</skippable> <supports-auto-skip-failure>false</supports-auto-skip-failure> <service-check>false</service-check> - <parallel-scheduler/> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> <service name="HBASE"> <component>HBASE_REGIONSERVER</component> @@ -260,22 +297,35 @@ <direction>UPGRADE</direction> <!-- prevent config changes on downgrade --> <skippable>true</skippable> <!-- May fix configuration problems manually --> - <!--YARN--> + <!-- HDFS HDP 2.5 --> + <execute-stage service="HDFS" component="NAMENODE" title="Apply config changes for Hdfs Namenode HA"> + <task xsi:type="configure" id="hdp_2_5_0_0_namenode_ha_adjustments"/> + </execute-stage> + + <execute-stage service="HDFS" component="NAMENODE" title="Apply config changes for Hdfs Namenode"> + <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hdfs_audit_db"/> + </execute-stage> + + <!-- YARN --> <execute-stage service="MAPREDUCE2" component="MAPREDUCE2_CLIENT" title="Apply config changes for Mapreduce2 client"> <task xsi:type="server_action" summary="Verifying LZO codec path for mapreduce" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath"/> </execute-stage> - <!-- YARN --> + <!-- YARN 2.5 --> <execute-stage service="YARN" component="RESOURCEMANAGER" title="Apply config changes for Yarn Resourcemanager"> <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_yarn_audit_db"/> </execute-stage> - <!-- YARN --> <execute-stage service="YARN" component="NODEMANAGER" title="Add Spark2 shuffle"> <task xsi:type="configure" id="hdp_2_5_0_0_add_spark2_yarn_shuffle"/> </execute-stage> - <!--TEZ--> + <!-- HBASE HDP 2.5 --> + <execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for Hbase Master"> + <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hbase_audit_db"/> + </execute-stage> + + <!--TEZ HDP 2.5 --> <execute-stage service="TEZ" component="TEZ_CLIENT" title="Verify LZO codec path for Tez"> <task xsi:type="server_action" summary="Verifying LZO codec path for Tez" class="org.apache.ambari.server.serveraction.upgrades.FixLzoCodecPath"/> </execute-stage> @@ -284,14 +334,32 @@ <task xsi:type="configure" id="hdp_2_5_0_0_tez_client_adjust_tez_lib_uris_property"/> </execute-stage> + <!-- HIVE HDP 2.5 --> + <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server"> + <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hive_audit_db"/> + </execute-stage> + + <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server"> + <!-- Remove Atlas configs that were incorrectly added to hive-site instead of Atlas' application.properties. --> + <task xsi:type="configure" id="hdp_2_5_0_0_remove_hive_atlas_configs"/> + </execute-stage> + + <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server"> + <task xsi:type="server_action" summary="Update hive-env content" class="org.apache.ambari.server.serveraction.upgrades.HiveEnvClasspathAction"/> + </execute-stage> + <!--OOZIE--> <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Apply config changes for Oozie Server"> <task xsi:type="server_action" summary="Adjusting Oozie properties" class="org.apache.ambari.server.serveraction.upgrades.OozieConfigCalculation"/> <task xsi:type="server_action" summary="Fix oozie admin users" class="org.apache.ambari.server.serveraction.upgrades.FixOozieAdminUsers"/> </execute-stage> + <!-- KNOX HDP 2.5 --> + <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Apply config changes for Knox Gateway"> + <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_knox_audit_db"/> + </execute-stage> - <!--FALCON--> + <!--FALCON--> <execute-stage service="FALCON" component="FALCON_SERVER" title="Apply config changes for Falcon"> <task xsi:type="configure" id="hdp_2_5_0_0_falcon_server_adjust_services_property"/> </execute-stage> @@ -325,15 +393,6 @@ <task xsi:type="server_action" summary="Configuring Ranger Alerts" class="org.apache.ambari.server.serveraction.upgrades.RangerWebAlertConfigAction"/> </execute-stage> - <!-- HDFS --> - <execute-stage service="HDFS" component="NAMENODE" title="Apply config changes for Hdfs Namenode HA"> - <task xsi:type="configure" id="hdp_2_5_0_0_namenode_ha_adjustments"/> - </execute-stage> - - <execute-stage service="HDFS" component="NAMENODE" title="Apply config changes for Hdfs Namenode"> - <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hdfs_audit_db"/> - </execute-stage> - <!-- SQOOP --> <execute-stage service="SQOOP" component="SQOOP" title="Apply config changes for Sqoop to remove Atlas Configs"> <!-- Remove Atlas configs that were incorrectly added to sqoop-site instead of Atlas' application.properties. --> @@ -346,31 +405,7 @@ <task xsi:type="configure" id="hdp_2_5_0_0_add_sqoop_atlas_security_configs" /> </execute-stage> - <!-- HIVE --> - <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server"> - <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hive_audit_db"/> - </execute-stage> - - <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server"> - <!-- Remove Atlas configs that were incorrectly added to hive-site instead of Atlas' application.properties. --> - <task xsi:type="configure" id="hdp_2_5_0_0_remove_hive_atlas_configs"/> - </execute-stage> - - <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server"> - <task xsi:type="server_action" summary="Update hive-env content" class="org.apache.ambari.server.serveraction.upgrades.HiveEnvClasspathAction"/> - </execute-stage> - - <!-- HBASE --> - <execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for Hbase Master"> - <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_hbase_audit_db"/> - </execute-stage> - - <!-- KNOX --> - <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Apply config changes for Knox Gateway"> - <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_knox_audit_db"/> - </execute-stage> - - <!-- STORM --> + <!-- STORM HDP 2.5 --> <execute-stage service="STORM" component="NIMBUS" title="Apply config changes for Storm Nimbus"> <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_storm_audit_db"/> </execute-stage> @@ -438,6 +473,11 @@ <supports-auto-skip-failure>false</supports-auto-skip-failure> <execute-stage title="Update stack to {{version}}"> + <!-- Large Cluster Hack: batch this command. --> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> + <task xsi:type="execute"> <script>scripts/ru_set_all.py</script> <function>actionexecute</function> @@ -445,13 +485,14 @@ </execute-stage> </group> - <!-- Now, restart all of the services. --> <group xsi:type="restart" name="ZOOKEEPER" title="ZooKeeper"> <service-check>false</service-check> <skippable>true</skippable> <supports-auto-skip-failure>false</supports-auto-skip-failure> - <parallel-scheduler/> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> <service name="ZOOKEEPER"> <service-check>true</service-check> <component>ZOOKEEPER_SERVER</component> @@ -463,7 +504,9 @@ <service-check>false</service-check> <skippable>true</skippable> <supports-auto-skip-failure>false</supports-auto-skip-failure> - <parallel-scheduler/> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> <service name="RANGER"> <component>RANGER_ADMIN</component> <component>RANGER_USERSYNC</component> @@ -474,7 +517,9 @@ <service-check>false</service-check> <skippable>true</skippable> <supports-auto-skip-failure>false</supports-auto-skip-failure> - <parallel-scheduler/> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> <service name="RANGER_KMS"> <component>RANGER_KMS_SERVER</component> </service> @@ -484,7 +529,9 @@ <service-check>false</service-check> <skippable>true</skippable> <supports-auto-skip-failure>false</supports-auto-skip-failure> - <parallel-scheduler/> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> <service name="HDFS"> <component>JOURNALNODE</component> <component>ZKFC</component> @@ -498,7 +545,9 @@ <group xsi:type="restart" name="HDFS_DATANODES" title="HDFS DataNodes"> <service-check>false</service-check> <skippable>true</skippable> - <parallel-scheduler/> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> <service name="HDFS"> <component>DATANODE</component> </service> @@ -520,7 +569,9 @@ <group xsi:type="restart" name="KAFKA" title="Kafka"> <service-check>false</service-check> <skippable>true</skippable> - <parallel-scheduler/> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> <service name="KAFKA"> <component>KAFKA_BROKER</component> </service> @@ -530,7 +581,9 @@ <service-check>false</service-check> <skippable>true</skippable> <supports-auto-skip-failure>false</supports-auto-skip-failure> - <parallel-scheduler/> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> <service name="MAPREDUCE2"> <component>HISTORYSERVER</component> @@ -547,7 +600,9 @@ <group xsi:type="restart" name="YARN_NODEMANAGERS" title="YARN NodeManagers"> <service-check>false</service-check> <skippable>true</skippable> - <parallel-scheduler/> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> <service name="YARN"> <component>NODEMANAGER</component> @@ -570,7 +625,9 @@ <group xsi:type="restart" name="CLIENTS" title="Tez, Pig, Sqoop Clients"> <service-check>false</service-check> <skippable>true</skippable> - <parallel-scheduler/> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> <service name="TEZ"> <component>TEZ_CLIENT</component> </service> @@ -612,6 +669,7 @@ <max-degree-of-parallelism>1</max-degree-of-parallelism> </parallel-scheduler> <service name="HIVE"> + <component>MYSQL_SERVER</component> <component>HIVE_METASTORE</component> <component>HIVE_SERVER</component> <component>WEBHCAT_SERVER</component> @@ -622,7 +680,9 @@ <service-check>false</service-check> <skippable>true</skippable> <supports-auto-skip-failure>false</supports-auto-skip-failure> - <parallel-scheduler/> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> <service name="HIVE"> <component>HIVE_CLIENT</component> <component>HCAT</component> @@ -633,7 +693,9 @@ <service-check>false</service-check> <skippable>true</skippable> <supports-auto-skip-failure>false</supports-auto-skip-failure> - <parallel-scheduler/> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> <service name="SPARK"> <component>SPARK_JOBHISTORYSERVER</component> <component>SPARK_THRIFTSERVER</component> @@ -643,7 +705,9 @@ <group xsi:type="restart" name="SPARK_CLIENTS" title="Spark Clients"> <service-check>false</service-check> <skippable>true</skippable> - <parallel-scheduler/> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> <service name="SPARK"> <component>SPARK_CLIENT</component> </service> @@ -653,7 +717,9 @@ <service-check>false</service-check> <skippable>true</skippable> <supports-auto-skip-failure>false</supports-auto-skip-failure> - <parallel-scheduler/> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> <service name="OOZIE"> <component>OOZIE_SERVER</component> </service> @@ -662,7 +728,9 @@ <group xsi:type="restart" name="OOZIE_CLIENTS" title="Oozie Clients"> <service-check>false</service-check> <skippable>true</skippable> - <parallel-scheduler/> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> <service name="OOZIE"> <component>OOZIE_CLIENT</component> </service> @@ -682,7 +750,9 @@ <service-check>false</service-check> <skippable>true</skippable> <supports-auto-skip-failure>false</supports-auto-skip-failure> - <parallel-scheduler/> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> <service name="FALCON"> <component>FALCON_SERVER</component> </service> @@ -691,7 +761,9 @@ <group xsi:type="restart" name="FALCON_CLIENTS" title="Falcon Clients"> <service-check>false</service-check> <skippable>true</skippable> - <parallel-scheduler/> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> <service name="FALCON"> <component>FALCON_CLIENT</component> </service> @@ -700,7 +772,9 @@ <group xsi:type="restart" name="KNOX" title="Knox"> <service-check>false</service-check> <skippable>true</skippable> - <parallel-scheduler/> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> <service name="KNOX"> <component>KNOX_GATEWAY</component> </service> @@ -709,7 +783,9 @@ <group xsi:type="restart" name="STORM" title="Storm"> <service-check>false</service-check> <skippable>true</skippable> - <parallel-scheduler/> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> <service name="STORM"> <component>NIMBUS</component> <component>SUPERVISOR</component> @@ -727,7 +803,9 @@ <group xsi:type="restart" name="SLIDER" title="Slider"> <service-check>false</service-check> <skippable>true</skippable> - <parallel-scheduler/> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> <service name="SLIDER"> <component>SLIDER</component> </service> @@ -736,7 +814,9 @@ <group xsi:type="restart" name="FLUME" title="Flume"> <service-check>false</service-check> <skippable>true</skippable> - <parallel-scheduler/> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> <service name="FLUME"> <component>FLUME_HANDLER</component> </service> @@ -745,7 +825,9 @@ <group xsi:type="restart" name="ACCUMULO" title="Accumulo"> <service-check>false</service-check> <skippable>true</skippable> - <parallel-scheduler/> + <parallel-scheduler> + <max-degree-of-parallelism>360</max-degree-of-parallelism> + </parallel-scheduler> <service name="ACCUMULO"> <component>ACCUMULO_MASTER</component> <component>ACCUMULO_TSERVER</component> @@ -1019,6 +1101,12 @@ </service> <service name="HIVE"> + <component name="MYSQL_SERVER"> + <upgrade> + <task xsi:type="restart-task"/> + </upgrade> + </component> + <component name="HIVE_METASTORE"> <upgrade> <task xsi:type="restart-task"/>
