AMBARI-13704. Finalize is invoked when it is sure to fail when there a failures that have been skipped during RU (ncole)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7546f202 Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7546f202 Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7546f202 Branch: refs/heads/branch-dev-patch-upgrade Commit: 7546f20237ec7c18116edb9f4efda4f5eb9932ca Parents: b8a9064 Author: Nate Cole <[email protected]> Authored: Tue Nov 3 17:08:46 2015 -0500 Committer: Nate Cole <[email protected]> Committed: Wed Nov 4 08:08:27 2015 -0500 ---------------------------------------------------------------------- .../upgrades/ComponentVersionCheckAction.java | 123 ++++++ .../upgrades/FinalizeUpgradeAction.java | 71 ++-- .../HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml | 9 + .../HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml | 9 + .../HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml | 9 + .../stacks/HDP/2.2/upgrades/upgrade-2.2.xml | 9 + .../stacks/HDP/2.2/upgrades/upgrade-2.3.xml | 9 + .../HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml | 9 + .../stacks/HDP/2.3/upgrades/upgrade-2.3.xml | 9 + .../ComponentVersionCheckActionTest.java | 426 +++++++++++++++++++ 10 files changed, 652 insertions(+), 31 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/ambari/blob/7546f202/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java new file mode 100644 index 0000000..39175c9 --- /dev/null +++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java @@ -0,0 +1,123 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.serveraction.upgrades; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.TreeSet; +import java.util.concurrent.ConcurrentMap; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.actionmanager.HostRoleStatus; +import org.apache.ambari.server.agent.CommandReport; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.StackId; +import org.apache.commons.lang.StringUtils; + +import com.google.gson.JsonArray; +import com.google.gson.JsonObject; +import com.google.gson.JsonPrimitive; + +/** + * Action that checks component versions to ensure {@link FinalizeUpgradeAction} will + * complete successfully. + */ +public class ComponentVersionCheckAction extends FinalizeUpgradeAction { + + + @Override + public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext) + throws AmbariException, InterruptedException { + + Map<String, String> commandParams = getExecutionCommand().getCommandParams(); + + String version = commandParams.get(VERSION_KEY); + StackId targetStackId = new StackId(commandParams.get(TARGET_STACK_KEY)); + String clusterName = getExecutionCommand().getClusterName(); + + Cluster cluster = clusters.getCluster(clusterName); + + List<InfoTuple> errors = checkHostComponentVersions(cluster, version, targetStackId); + + StringBuilder outSB = new StringBuilder(); + StringBuilder errSB = new StringBuilder(); + + if (errors.isEmpty()) { + outSB.append("No version mismatches found for components"); + errSB.append("No errors found for components"); + return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", outSB.toString(), errSB.toString()); + } else { + String structuredOut = getErrors(outSB, errSB, errors); + return createCommandReport(0, HostRoleStatus.HOLDING, structuredOut, outSB.toString(), errSB.toString()); + } + } + + private String getErrors(StringBuilder outSB, StringBuilder errSB, List<InfoTuple> errors) { + + errSB.append("The following components were found to have version mismatches. "); + errSB.append("Finalize will not complete successfully:\n"); + + Set<String> hosts = new TreeSet<>(); + Map<String, JsonArray> hostDetails = new HashMap<>(); + + for (InfoTuple tuple : errors) { + errSB.append(tuple.hostName).append(": "); + errSB.append(tuple.serviceName).append('/').append(tuple.componentName); + errSB.append(" reports ").append(StringUtils.trimToEmpty(tuple.currentVersion)); + errSB.append('\n'); + + hosts.add(tuple.hostName); + + if (!hostDetails.containsKey(tuple.hostName)) { + hostDetails.put(tuple.hostName, new JsonArray()); + } + + JsonObject obj = new JsonObject(); + obj.addProperty("service", tuple.serviceName); + obj.addProperty("component", tuple.componentName); + obj.addProperty("version", tuple.currentVersion); + + hostDetails.get(tuple.hostName).add(obj); + } + + JsonArray hostJson = new JsonArray(); + for (String h : hosts) { + hostJson.add(new JsonPrimitive(h)); + } + + JsonObject valueJson = new JsonObject(); + for (Entry<String, JsonArray> entry : hostDetails.entrySet()) { + valueJson.add(entry.getKey(), entry.getValue()); + } + + outSB.append(String.format("There were errors on the following hosts: %s", + StringUtils.join(hosts, ", "))); + + JsonObject obj = new JsonObject(); + obj.add("hosts", hostJson); + obj.add("host_detail", valueJson); + + return obj.toString(); + } + + + +} http://git-wip-us.apache.org/repos/asf/ambari/blob/7546f202/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java index 22fc4c0..ba4dadc 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java @@ -83,7 +83,7 @@ public class FinalizeUpgradeAction extends AbstractServerAction { * The Cluster that this ServerAction implementation is executing on */ @Inject - private Clusters clusters; + protected Clusters clusters; @Inject private ClusterVersionDAO clusterVersionDAO; @@ -217,7 +217,22 @@ public class FinalizeUpgradeAction extends AbstractServerAction { // iterate through all host components and make sure that they are on the // correct version; if they are not, then this will throw an exception - checkHostComponentVersions(cluster, version, clusterDesiredStackId); + List<InfoTuple> errors = checkHostComponentVersions(cluster, version, clusterDesiredStackId); + if (! errors.isEmpty()) { + StrBuilder messageBuff = new StrBuilder( + String.format( + "The following %d host component(s) " + + "have not been upgraded to version %s. Please install and upgrade " + + "the Stack Version on those hosts and try again.\nHost components:\n", + errors.size(), version)); + + for (InfoTuple error : errors) { + messageBuff.append(String.format("%s on host %s\n", error.componentName, error.hostName)); + } + + throw new AmbariException(messageBuff.toString()); + } + // we're guaranteed to be ready transition to UPGRADED now; ensure that // the transition will be allowed if the cluster state is not UPGRADED @@ -385,26 +400,16 @@ public class FinalizeUpgradeAction extends AbstractServerAction { /** * Confirms that all host components that are able to provide hdp version, * have been upgraded to the target version. - * @param cluster the cluster the upgrade is for - * @param desiredVersion the target version of the upgrade - * @throws AmbariException if any host component has not been updated yet + * @param cluster the cluster the upgrade is for + * @param desiredVersion the target version of the upgrade + * @param targetStack the target stack id for meta-info lookup + * @return the list of {@link InfoTuple} objects of host components in error */ - private void checkHostComponentVersions(Cluster cluster, String desiredVersion, StackId targetStackId) + protected List<InfoTuple> checkHostComponentVersions(Cluster cluster, String desiredVersion, StackId targetStackId) throws AmbariException { - class InfoTuple { - public final String serviceName; - public final String componentName; - public final String hostName; - - public InfoTuple(String serviceName, String componentName, String hostName) { - this.serviceName = serviceName; - this.componentName = componentName; - this.hostName = hostName; - } - } - ArrayList<InfoTuple> errors = new ArrayList<InfoTuple>(); + for (Service service : cluster.getServices().values()) { for (ServiceComponent serviceComponent : service.getServiceComponents().values()) { for (ServiceComponentHost serviceComponentHost : serviceComponent.getServiceComponentHosts().values()) { @@ -420,25 +425,29 @@ public class FinalizeUpgradeAction extends AbstractServerAction { } else if (componentInfo.isVersionAdvertised() && !serviceComponentHost.getVersion().equals(desiredVersion)) { errors.add(new InfoTuple( - service.getName(), serviceComponent.getName(), serviceComponentHost.getHostName())); + service.getName(), serviceComponent.getName(), + serviceComponentHost.getHostName(), serviceComponentHost.getVersion())); } } } } - if (! errors.isEmpty()) { - StrBuilder messageBuff = new StrBuilder( - String.format( - "The following %d host component(s) " - + "have not been upgraded to version %s. Please install and upgrade " - + "the Stack Version on those hosts and try again.\nHost components:\n", - errors.size(), desiredVersion)); - - for (InfoTuple error : errors) { - messageBuff.append(String.format("%s on host %s\n", error.componentName, error.hostName)); - } + return errors; + } - throw new AmbariException(messageBuff.toString()); + protected static class InfoTuple { + protected final String serviceName; + protected final String componentName; + protected final String hostName; + protected final String currentVersion; + + protected InfoTuple(String service, String component, String host, String version) { + serviceName = service; + componentName = component; + hostName = host; + currentVersion = version; } + } + } http://git-wip-us.apache.org/repos/asf/ambari/blob/7546f202/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml index 3730d67..91d4f64 100644 --- a/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml +++ b/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml @@ -462,6 +462,15 @@ </execute-stage> </group> + <group xsi:type="cluster" name="FINALIZE_PRE_CHECK" title="Finalize {{direction.text.proper}} Pre-Check"> + <skippable>false</skippable> + <direction>UPGRADE</direction> + + <execute-stage title="Check Component Versions"> + <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.ComponentVersionCheckAction" /> + </execute-stage> + </group> + <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}"> <skippable>true</skippable> <execute-stage title="Check Unhealthy Hosts" id="unhealthy-hosts"> http://git-wip-us.apache.org/repos/asf/ambari/blob/7546f202/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml index 677bc8d..69be963 100644 --- a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml @@ -457,6 +457,15 @@ </execute-stage> </group> + <group xsi:type="cluster" name="FINALIZE_PRE_CHECK" title="Finalize {{direction.text.proper}} Pre-Check"> + <skippable>false</skippable> + <direction>UPGRADE</direction> + + <execute-stage title="Check Component Versions"> + <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.ComponentVersionCheckAction" /> + </execute-stage> + </group> + <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}"> <skippable>true</skippable> <execute-stage title="Check Unhealthy Hosts" id="unhealthy-hosts"> http://git-wip-us.apache.org/repos/asf/ambari/blob/7546f202/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml index d41fc36..06e5859 100644 --- a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml @@ -725,6 +725,15 @@ </execute-stage> </group> + <group xsi:type="cluster" name="FINALIZE_PRE_CHECK" title="Finalize {{direction.text.proper}} Pre-Check"> + <skippable>false</skippable> + <direction>UPGRADE</direction> + + <execute-stage title="Check Component Versions"> + <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.ComponentVersionCheckAction" /> + </execute-stage> + </group> + <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}"> <skippable>true</skippable> <execute-stage title="Check Unhealthy Hosts" id="unhealthy-hosts"> http://git-wip-us.apache.org/repos/asf/ambari/blob/7546f202/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.2.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.2.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.2.xml index 753d1cc..5248d7a 100644 --- a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.2.xml +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.2.xml @@ -296,6 +296,15 @@ </execute-stage> </group> + <group xsi:type="cluster" name="FINALIZE_PRE_CHECK" title="Finalize {{direction.text.proper}} Pre-Check"> + <skippable>false</skippable> + <direction>UPGRADE</direction> + + <execute-stage title="Check Component Versions"> + <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.ComponentVersionCheckAction" /> + </execute-stage> + </group> + <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}"> <skippable>true</skippable> <execute-stage title="Check Unhealthy Hosts" id="unhealthy-hosts"> http://git-wip-us.apache.org/repos/asf/ambari/blob/7546f202/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml index f910c90..645ec0d 100644 --- a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml +++ b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml @@ -338,6 +338,15 @@ </execute-stage> </group> + <group xsi:type="cluster" name="FINALIZE_PRE_CHECK" title="Finalize {{direction.text.proper}} Pre-Check"> + <skippable>false</skippable> + <direction>UPGRADE</direction> + + <execute-stage title="Check Component Versions"> + <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.ComponentVersionCheckAction" /> + </execute-stage> + </group> + <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}"> <skippable>true</skippable> <execute-stage title="Check Unhealthy Hosts" id="unhealthy-hosts"> http://git-wip-us.apache.org/repos/asf/ambari/blob/7546f202/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml index 4ef129d..016bad6 100644 --- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml +++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml @@ -523,6 +523,15 @@ </execute-stage> </group> + <group xsi:type="cluster" name="FINALIZE_PRE_CHECK" title="Finalize {{direction.text.proper}} Pre-Check"> + <skippable>false</skippable> + <direction>UPGRADE</direction> + + <execute-stage title="Check Component Versions"> + <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.ComponentVersionCheckAction" /> + </execute-stage> + </group> + <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}"> <skippable>true</skippable> <execute-stage title="Check Unhealthy Hosts" id="unhealthy-hosts"> http://git-wip-us.apache.org/repos/asf/ambari/blob/7546f202/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml index 5ebaf2c..60723b7 100644 --- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml +++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml @@ -341,6 +341,15 @@ </execute-stage> </group> + <group xsi:type="cluster" name="FINALIZE_PRE_CHECK" title="Finalize {{direction.text.proper}} Pre-Check"> + <skippable>false</skippable> + <direction>UPGRADE</direction> + + <execute-stage title="Check Component Versions"> + <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.ComponentVersionCheckAction" /> + </execute-stage> + </group> + <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize {{direction.text.proper}}"> <skippable>true</skippable> <execute-stage title="Check Unhealthy Hosts" id="unhealthy-hosts"> http://git-wip-us.apache.org/repos/asf/ambari/blob/7546f202/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java new file mode 100644 index 0000000..ea0cf45 --- /dev/null +++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java @@ -0,0 +1,426 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.serveraction.upgrades; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.ServiceComponentNotFoundException; +import org.apache.ambari.server.ServiceNotFoundException; +import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper; +import org.apache.ambari.server.actionmanager.HostRoleCommand; +import org.apache.ambari.server.actionmanager.HostRoleCommandFactory; +import org.apache.ambari.server.actionmanager.HostRoleStatus; +import org.apache.ambari.server.agent.CommandReport; +import org.apache.ambari.server.agent.ExecutionCommand; +import org.apache.ambari.server.api.services.AmbariMetaInfo; +import org.apache.ambari.server.orm.GuiceJpaInitializer; +import org.apache.ambari.server.orm.InMemoryDefaultTestModule; +import org.apache.ambari.server.orm.OrmTestHelper; +import org.apache.ambari.server.orm.dao.ClusterVersionDAO; +import org.apache.ambari.server.orm.dao.HostDAO; +import org.apache.ambari.server.orm.dao.HostVersionDAO; +import org.apache.ambari.server.orm.dao.RepositoryVersionDAO; +import org.apache.ambari.server.orm.dao.StackDAO; +import org.apache.ambari.server.orm.entities.ClusterVersionEntity; +import org.apache.ambari.server.orm.entities.HostVersionEntity; +import org.apache.ambari.server.orm.entities.StackEntity; +import org.apache.ambari.server.state.Cluster; +import org.apache.ambari.server.state.Clusters; +import org.apache.ambari.server.state.Config; +import org.apache.ambari.server.state.ConfigImpl; +import org.apache.ambari.server.state.Host; +import org.apache.ambari.server.state.RepositoryInfo; +import org.apache.ambari.server.state.RepositoryVersionState; +import org.apache.ambari.server.state.Service; +import org.apache.ambari.server.state.ServiceComponent; +import org.apache.ambari.server.state.ServiceComponentFactory; +import org.apache.ambari.server.state.ServiceComponentHost; +import org.apache.ambari.server.state.ServiceComponentHostFactory; +import org.apache.ambari.server.state.ServiceFactory; +import org.apache.ambari.server.state.StackId; +import org.apache.ambari.server.state.State; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import com.google.inject.Guice; +import com.google.inject.Inject; +import com.google.inject.Injector; +import com.google.inject.persist.PersistService; +import com.google.inject.persist.UnitOfWork; + +/** + * Tests upgrade-related server side actions + */ +public class ComponentVersionCheckActionTest { + private static final String HDP_2_1_1_0 = "2.1.1.0-1"; + private static final String HDP_2_1_1_1 = "2.1.1.1-2"; + + private static final String HDP_2_2_1_0 = "2.2.0.1-3"; + + private static final StackId HDP_21_STACK = new StackId("HDP-2.1.1"); + private static final StackId HDP_22_STACK = new StackId("HDP-2.2.0"); + + private static final String HDP_211_CENTOS6_REPO_URL = "http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.1.1.0-118"; + + private Injector m_injector; + + @Inject + private OrmTestHelper m_helper; + + @Inject + private RepositoryVersionDAO repoVersionDAO; + + @Inject + private ClusterVersionDAO clusterVersionDAO; + + @Inject + private HostVersionDAO hostVersionDAO; + + @Inject + private HostRoleCommandFactory hostRoleCommandFactory; + + @Inject + private ServiceFactory serviceFactory; + + @Inject + private ServiceComponentFactory serviceComponentFactory; + + @Inject + private ServiceComponentHostFactory serviceComponentHostFactory; + + @Before + public void setup() throws Exception { + m_injector = Guice.createInjector(new InMemoryDefaultTestModule()); + m_injector.getInstance(GuiceJpaInitializer.class); + m_injector.injectMembers(this); + m_injector.getInstance(UnitOfWork.class).begin(); + } + + @After + public void teardown() throws Exception { + m_injector.getInstance(UnitOfWork.class).end(); + m_injector.getInstance(PersistService.class).stop(); + } + + private void makeUpgradeCluster(StackId sourceStack, String sourceRepo, StackId targetStack, String targetRepo) throws Exception { + String clusterName = "c1"; + String hostName = "h1"; + + Clusters clusters = m_injector.getInstance(Clusters.class); + clusters.addCluster(clusterName, sourceStack); + + StackDAO stackDAO = m_injector.getInstance(StackDAO.class); + StackEntity stackEntitySource = stackDAO.find(sourceStack.getStackName(), sourceStack.getStackVersion()); + StackEntity stackEntityTarget = stackDAO.find(targetStack.getStackName(), targetStack.getStackVersion()); + assertNotNull(stackEntitySource); + assertNotNull(stackEntityTarget); + + Cluster c = clusters.getCluster(clusterName); + c.setDesiredStackVersion(sourceStack); + + // add a host component + clusters.addHost(hostName); + + Host host = clusters.getHost(hostName); + + Map<String, String> hostAttributes = new HashMap<String, String>(); + hostAttributes.put("os_family", "redhat"); + hostAttributes.put("os_release_version", "6"); + host.setHostAttributes(hostAttributes); + host.persist(); + + // Create the starting repo version + m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo); + c.createClusterVersion(sourceStack, sourceRepo, "admin", RepositoryVersionState.UPGRADING); + c.transitionClusterVersion(sourceStack, sourceRepo, RepositoryVersionState.CURRENT); + + // Create the new repo version + String urlInfo = "[{'repositories':[" + + "{'Repositories/base_url':'http://foo1','Repositories/repo_name':'HDP','Repositories/repo_id':'" + targetStack.getStackId() + "'}" + + "], 'OperatingSystems/os_type':'redhat6'}]"; + repoVersionDAO.create(stackEntityTarget, targetRepo, String.valueOf(System.currentTimeMillis()), urlInfo); + + // Start upgrading the newer repo + c.createClusterVersion(targetStack, targetRepo, "admin", RepositoryVersionState.INSTALLING); + c.transitionClusterVersion(targetStack, targetRepo, RepositoryVersionState.INSTALLED); + c.transitionClusterVersion(targetStack, targetRepo, RepositoryVersionState.UPGRADING); + c.transitionClusterVersion(targetStack, targetRepo, RepositoryVersionState.UPGRADED); + c.setCurrentStackVersion(targetStack); + + c.mapHostVersions(Collections.singleton(hostName), c.getCurrentClusterVersion(), + RepositoryVersionState.CURRENT); + + HostDAO hostDAO = m_injector.getInstance(HostDAO.class); + + HostVersionEntity entity = new HostVersionEntity(); + entity.setHostEntity(hostDAO.findByName(hostName)); + entity.setRepositoryVersion(repoVersionDAO.findByStackAndVersion(targetStack, targetRepo)); + entity.setState(RepositoryVersionState.UPGRADED); + hostVersionDAO.create(entity); + } + + private void makeCrossStackUpgradeCluster(StackId sourceStack, String sourceRepo, StackId targetStack, String targetRepo) throws Exception { + String clusterName = "c1"; + String hostName = "h1"; + + Clusters clusters = m_injector.getInstance(Clusters.class); + clusters.addCluster(clusterName, sourceStack); + + StackDAO stackDAO = m_injector.getInstance(StackDAO.class); + StackEntity stackEntitySource = stackDAO.find(sourceStack.getStackName(), sourceStack.getStackVersion()); + StackEntity stackEntityTarget = stackDAO.find(targetStack.getStackName(), targetStack.getStackVersion()); + + assertNotNull(stackEntitySource); + assertNotNull(stackEntityTarget); + + Cluster c = clusters.getCluster(clusterName); + c.setCurrentStackVersion(sourceStack); + c.setDesiredStackVersion(sourceStack); + + // add a host component + clusters.addHost(hostName); + Host host = clusters.getHost(hostName); + + Map<String, String> hostAttributes = new HashMap<String, String>(); + hostAttributes.put("os_family", "redhat"); + hostAttributes.put("os_release_version", "6"); + host.setHostAttributes(hostAttributes); + host.persist(); + + clusters.mapHostToCluster(hostName, clusterName); + + // Create the starting repo version + m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo); + c.createClusterVersion(sourceStack, sourceRepo, "admin", RepositoryVersionState.UPGRADING); + c.transitionClusterVersion(sourceStack, sourceRepo, RepositoryVersionState.CURRENT); + + // Create the new repo version + String urlInfo = "[{'repositories':[" + + "{'Repositories/base_url':'http://foo1','Repositories/repo_name':'HDP','Repositories/repo_id':'" + targetRepo + "'}" + + "], 'OperatingSystems/os_type':'redhat6'}]"; + repoVersionDAO.create(stackEntityTarget, targetRepo, String.valueOf(System.currentTimeMillis()), urlInfo); + + // Start upgrading the newer repo + c.createClusterVersion(targetStack, targetRepo, "admin", RepositoryVersionState.INSTALLING); + c.transitionClusterVersion(targetStack, targetRepo, RepositoryVersionState.INSTALLED); + c.transitionClusterVersion(targetStack, targetRepo, RepositoryVersionState.UPGRADING); + c.transitionClusterVersion(targetStack, targetRepo, RepositoryVersionState.UPGRADED); + + c.mapHostVersions(Collections.singleton(hostName), c.getCurrentClusterVersion(), + RepositoryVersionState.CURRENT); + + HostDAO hostDAO = m_injector.getInstance(HostDAO.class); + + HostVersionEntity entity = new HostVersionEntity(); + entity.setHostEntity(hostDAO.findByName(hostName)); + entity.setRepositoryVersion(repoVersionDAO.findByStackAndVersion(targetStack, targetRepo)); + entity.setState(RepositoryVersionState.UPGRADED); + hostVersionDAO.create(entity); + } + + @Test + public void testMatchingVersions() throws Exception { + StackId sourceStack = HDP_21_STACK; + StackId targetStack = HDP_21_STACK; + String sourceRepo = HDP_2_1_1_0; + String targetRepo = HDP_2_1_1_1; + + makeUpgradeCluster(sourceStack, sourceRepo, targetStack, targetRepo); + + // Verify the repo before calling Finalize + AmbariMetaInfo metaInfo = m_injector.getInstance(AmbariMetaInfo.class); + + RepositoryInfo repo = metaInfo.getRepository(sourceStack.getStackName(), sourceStack.getStackVersion(), "redhat6", sourceStack.getStackId()); + assertEquals(HDP_211_CENTOS6_REPO_URL, repo.getBaseUrl()); + + // Finalize the upgrade + Map<String, String> commandParams = new HashMap<String, String>(); + commandParams.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "upgrade"); + commandParams.put(FinalizeUpgradeAction.VERSION_KEY, targetRepo); + + ExecutionCommand executionCommand = new ExecutionCommand(); + executionCommand.setCommandParams(commandParams); + executionCommand.setClusterName("c1"); + + HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null); + hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand)); + + ComponentVersionCheckAction action = m_injector.getInstance(ComponentVersionCheckAction.class); + action.setExecutionCommand(executionCommand); + action.setHostRoleCommand(hostRoleCommand); + + CommandReport report = action.execute(null); + assertNotNull(report); + assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus()); + + } + + @Test + public void testMixedComponentVersions() throws Exception { + StackId sourceStack = HDP_21_STACK; + StackId targetStack = HDP_22_STACK; + String sourceRepo = HDP_2_1_1_0; + String targetRepo = HDP_2_2_1_0; + + makeCrossStackUpgradeCluster(sourceStack, sourceRepo, targetStack, targetRepo); + + Clusters clusters = m_injector.getInstance(Clusters.class); + Cluster cluster = clusters.getCluster("c1"); + + Service service = installService(cluster, "HDFS"); + addServiceComponent(cluster, service, "NAMENODE"); + addServiceComponent(cluster, service, "DATANODE"); + createNewServiceComponentHost(cluster, "HDFS", "NAMENODE", "h1"); + createNewServiceComponentHost(cluster, "HDFS", "DATANODE", "h1"); + + // create some configs + createConfigs(cluster); + + // setup the cluster for the upgrade across stacks + cluster.setCurrentStackVersion(sourceStack); + cluster.setDesiredStackVersion(targetStack); + + // set the SCH versions to the new stack so that the finalize action is + // happy + cluster.getServiceComponentHosts("HDFS", "NAMENODE").get(0).setVersion(targetRepo); + // don't update DATANODE - we want to make the action complain + + // inject an unhappy path where the cluster repo version is still UPGRADING + // even though all of the hosts are UPGRADED + ClusterVersionEntity upgradingClusterVersion = clusterVersionDAO.findByClusterAndStackAndVersion( + "c1", HDP_22_STACK, targetRepo); + + upgradingClusterVersion.setState(RepositoryVersionState.UPGRADING); + upgradingClusterVersion = clusterVersionDAO.merge(upgradingClusterVersion); + + // verify the conditions for the test are met properly + upgradingClusterVersion = clusterVersionDAO.findByClusterAndStackAndVersion("c1", HDP_22_STACK, targetRepo); + List<HostVersionEntity> hostVersions = hostVersionDAO.findByClusterStackAndVersion("c1", HDP_22_STACK, targetRepo); + + assertEquals(RepositoryVersionState.UPGRADING, upgradingClusterVersion.getState()); + assertTrue(hostVersions.size() > 0); + for (HostVersionEntity hostVersion : hostVersions) { + assertEquals(RepositoryVersionState.UPGRADED, hostVersion.getState()); + } + + // now finalize and ensure we can transition from UPGRADING to UPGRADED + // automatically before CURRENT + Map<String, String> commandParams = new HashMap<String, String>(); + commandParams.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "upgrade"); + commandParams.put(FinalizeUpgradeAction.VERSION_KEY, targetRepo); + commandParams.put(FinalizeUpgradeAction.ORIGINAL_STACK_KEY, sourceStack.getStackId()); + commandParams.put(FinalizeUpgradeAction.TARGET_STACK_KEY, targetStack.getStackId()); + + ExecutionCommand executionCommand = new ExecutionCommand(); + executionCommand.setCommandParams(commandParams); + executionCommand.setClusterName("c1"); + + HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null); + + hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand)); + + ComponentVersionCheckAction action = m_injector.getInstance(ComponentVersionCheckAction.class); + action.setExecutionCommand(executionCommand); + action.setHostRoleCommand(hostRoleCommand); + + CommandReport report = action.execute(null); + assertNotNull(report); + assertEquals(HostRoleStatus.HOLDING.name(), report.getStatus()); + + + + } + + private ServiceComponentHost createNewServiceComponentHost(Cluster cluster, String svc, + String svcComponent, String hostName) throws AmbariException { + Assert.assertNotNull(cluster.getConfigGroups()); + Service s = installService(cluster, svc); + ServiceComponent sc = addServiceComponent(cluster, s, svcComponent); + + ServiceComponentHost sch = serviceComponentHostFactory.createNew(sc, hostName); + + sc.addServiceComponentHost(sch); + sch.setDesiredState(State.INSTALLED); + sch.setState(State.INSTALLED); + sch.setDesiredStackVersion(cluster.getDesiredStackVersion()); + sch.setStackVersion(cluster.getCurrentStackVersion()); + + sch.persist(); + return sch; + } + + private Service installService(Cluster cluster, String serviceName) throws AmbariException { + Service service = null; + + try { + service = cluster.getService(serviceName); + } catch (ServiceNotFoundException e) { + service = serviceFactory.createNew(cluster, serviceName); + cluster.addService(service); + service.persist(); + } + + return service; + } + + private ServiceComponent addServiceComponent(Cluster cluster, Service service, + String componentName) throws AmbariException { + ServiceComponent serviceComponent = null; + try { + serviceComponent = service.getServiceComponent(componentName); + } catch (ServiceComponentNotFoundException e) { + serviceComponent = serviceComponentFactory.createNew(service, componentName); + service.addServiceComponent(serviceComponent); + serviceComponent.setDesiredState(State.INSTALLED); + serviceComponent.persist(); + } + + return serviceComponent; + } + + private void createConfigs(Cluster cluster) { + Map<String, String> properties = new HashMap<String, String>(); + Map<String, Map<String, String>> propertiesAttributes = new HashMap<String, Map<String, String>>(); + properties.put("a", "a1"); + properties.put("b", "b1"); + + Config c1 = new ConfigImpl(cluster, "hdfs-site", properties, propertiesAttributes, m_injector); + properties.put("c", "c1"); + properties.put("d", "d1"); + + Config c2 = new ConfigImpl(cluster, "core-site", properties, propertiesAttributes, m_injector); + Config c3 = new ConfigImpl(cluster, "foo-site", properties, propertiesAttributes, m_injector); + + cluster.addConfig(c1); + cluster.addConfig(c2); + cluster.addConfig(c3); + c1.persist(); + c2.persist(); + c3.persist(); + } +}
