http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/test/java/org/apache/ambari/server/state/stack/ConfigUpgradePackTest.java ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/stack/ConfigUpgradePackTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/stack/ConfigUpgradePackTest.java new file mode 100644 index 0000000..388a81f --- /dev/null +++ b/ambari-server/src/test/java/org/apache/ambari/server/state/stack/ConfigUpgradePackTest.java @@ -0,0 +1,198 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.state.stack; + +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.google.inject.persist.PersistService; +import org.apache.ambari.server.api.services.AmbariMetaInfo; +import org.apache.ambari.server.orm.GuiceJpaInitializer; +import org.apache.ambari.server.orm.InMemoryDefaultTestModule; +import org.apache.ambari.server.state.stack.upgrade.*; +import org.apache.ambari.server.state.stack.upgrade.ClusterGrouping.ExecuteStage; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import static org.apache.ambari.server.state.stack.ConfigUpgradePack.AffectedService; +import static org.apache.ambari.server.state.stack.ConfigUpgradePack.AffectedComponent; +import static org.junit.Assert.*; + +/** + * Tests for the config upgrade pack + */ +public class ConfigUpgradePackTest { + + private Injector injector; + private AmbariMetaInfo ambariMetaInfo; + + @Before + public void before() throws Exception { + injector = Guice.createInjector(new InMemoryDefaultTestModule()); + injector.getInstance(GuiceJpaInitializer.class); + + ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class); + } + + @After + public void teardown() { + injector.getInstance(PersistService.class).stop(); + } + + @Test + public void testMerge() { + // Generate test data - 3 config upgrade packs, 2 services, 2 components, 2 config changes each + ArrayList<ConfigUpgradePack> cups = new ArrayList<>(); + for (int cupIndex = 0; cupIndex < 3; cupIndex++) { + + ArrayList<AffectedService> services = new ArrayList<>(); + for (int serviceIndex = 0; serviceIndex < 2; serviceIndex++) { + String serviceName; + if (serviceIndex == 0) { + serviceName = "HDFS"; // For checking merge of existing services + } else { + serviceName = String.format("SOME_SERVICE_%s", cupIndex); + } + ArrayList<AffectedComponent> components = new ArrayList<>(); + for (int componentIndex = 0; componentIndex < 2; componentIndex++) { + String componentName; + if (componentIndex == 0) { + componentName = "NAMENODE"; // For checking merge of existing components + } else { + componentName = "SOME_COMPONENT_" + cupIndex; + } + + ArrayList<ConfigUpgradeChangeDefinition> changeDefinitions = new ArrayList<>(); + for (int changeIndex = 0; changeIndex < 2; changeIndex++) { + String change_id = String.format( + "CHANGE_%s_%s_%s_%s", cupIndex, serviceIndex, componentIndex, changeIndex); + ConfigUpgradeChangeDefinition changeDefinition = new ConfigUpgradeChangeDefinition(); + changeDefinition.id = change_id; + changeDefinitions.add(changeDefinition); + } + AffectedComponent component = new AffectedComponent(); + component.name = componentName; + component.changes = changeDefinitions; + components.add(component); + } + AffectedService service = new AffectedService(); + service.name = serviceName; + service.components = components; + services.add(service); + } + ConfigUpgradePack cupI = new ConfigUpgradePack(); + cupI.services = services; + cups.add(cupI); + } + + // Merge + + ConfigUpgradePack result = ConfigUpgradePack.merge(cups); + + + // Check test results + + assertEquals(result.enumerateConfigChangesByID().entrySet().size(), 24); + + assertEquals(result.getServiceMap().get("HDFS").getComponentMap().get("NAMENODE").changes.get(0).id, "CHANGE_0_0_0_0"); + assertEquals(result.getServiceMap().get("HDFS").getComponentMap().get("NAMENODE").changes.get(1).id, "CHANGE_0_0_0_1"); + assertEquals(result.getServiceMap().get("HDFS").getComponentMap().get("NAMENODE").changes.get(2).id, "CHANGE_1_0_0_0"); + assertEquals(result.getServiceMap().get("HDFS").getComponentMap().get("NAMENODE").changes.get(3).id, "CHANGE_1_0_0_1"); + assertEquals(result.getServiceMap().get("HDFS").getComponentMap().get("NAMENODE").changes.get(4).id, "CHANGE_2_0_0_0"); + assertEquals(result.getServiceMap().get("HDFS").getComponentMap().get("NAMENODE").changes.get(5).id, "CHANGE_2_0_0_1"); + + + assertEquals(result.getServiceMap().get("HDFS").getComponentMap().get("SOME_COMPONENT_0").changes.get(0).id, "CHANGE_0_0_1_0"); + assertEquals(result.getServiceMap().get("HDFS").getComponentMap().get("SOME_COMPONENT_0").changes.get(1).id, "CHANGE_0_0_1_1"); + + assertEquals(result.getServiceMap().get("HDFS").getComponentMap().get("SOME_COMPONENT_1").changes.get(0).id, "CHANGE_1_0_1_0"); + assertEquals(result.getServiceMap().get("HDFS").getComponentMap().get("SOME_COMPONENT_1").changes.get(1).id, "CHANGE_1_0_1_1"); + + assertEquals(result.getServiceMap().get("HDFS").getComponentMap().get("SOME_COMPONENT_2").changes.get(0).id, "CHANGE_2_0_1_0"); + assertEquals(result.getServiceMap().get("HDFS").getComponentMap().get("SOME_COMPONENT_2").changes.get(1).id, "CHANGE_2_0_1_1"); + + + assertEquals(result.getServiceMap().get("SOME_SERVICE_0").getComponentMap().get("NAMENODE").changes.get(0).id, "CHANGE_0_1_0_0"); + assertEquals(result.getServiceMap().get("SOME_SERVICE_0").getComponentMap().get("NAMENODE").changes.get(1).id, "CHANGE_0_1_0_1"); + assertEquals(result.getServiceMap().get("SOME_SERVICE_0").getComponentMap().get("SOME_COMPONENT_0").changes.get(0).id, "CHANGE_0_1_1_0"); + assertEquals(result.getServiceMap().get("SOME_SERVICE_0").getComponentMap().get("SOME_COMPONENT_0").changes.get(1).id, "CHANGE_0_1_1_1"); + + assertEquals(result.getServiceMap().get("SOME_SERVICE_1").getComponentMap().get("NAMENODE").changes.get(0).id, "CHANGE_1_1_0_0"); + assertEquals(result.getServiceMap().get("SOME_SERVICE_1").getComponentMap().get("NAMENODE").changes.get(1).id, "CHANGE_1_1_0_1"); + assertEquals(result.getServiceMap().get("SOME_SERVICE_1").getComponentMap().get("SOME_COMPONENT_1").changes.get(0).id, "CHANGE_1_1_1_0"); + assertEquals(result.getServiceMap().get("SOME_SERVICE_1").getComponentMap().get("SOME_COMPONENT_1").changes.get(1).id, "CHANGE_1_1_1_1"); + + assertEquals(result.getServiceMap().get("SOME_SERVICE_2").getComponentMap().get("NAMENODE").changes.get(0).id, "CHANGE_2_1_0_0"); + assertEquals(result.getServiceMap().get("SOME_SERVICE_2").getComponentMap().get("NAMENODE").changes.get(1).id, "CHANGE_2_1_0_1"); + assertEquals(result.getServiceMap().get("SOME_SERVICE_2").getComponentMap().get("SOME_COMPONENT_2").changes.get(0).id, "CHANGE_2_1_1_0"); + assertEquals(result.getServiceMap().get("SOME_SERVICE_2").getComponentMap().get("SOME_COMPONENT_2").changes.get(1).id, "CHANGE_2_1_1_1"); + + } + + @Test + public void testConfigUpgradeDefinitionParsing() throws Exception { + ConfigUpgradePack cup = ambariMetaInfo.getConfigUpgradePack("HDP", "2.1.1"); + Map<String, ConfigUpgradeChangeDefinition> changesByID = cup.enumerateConfigChangesByID(); + + ConfigUpgradeChangeDefinition hdp_2_1_1_nm_pre_upgrade = changesByID.get("hdp_2_1_1_nm_pre_upgrade"); + assertEquals("core-site", hdp_2_1_1_nm_pre_upgrade.getConfigType()); + assertEquals(4, hdp_2_1_1_nm_pre_upgrade.getTransfers().size()); + + /* + <transfer operation="COPY" from-key="copy-key" to-key="copy-key-to" /> + <transfer operation="COPY" from-type="my-site" from-key="my-copy-key" to-key="my-copy-key-to" /> + <transfer operation="MOVE" from-key="move-key" to-key="move-key-to" /> + <transfer operation="DELETE" delete-key="delete-key"> + <keep-key>important-key</keep-key> + </transfer> + */ + ConfigUpgradeChangeDefinition.Transfer t1 = hdp_2_1_1_nm_pre_upgrade.getTransfers().get(0); + assertEquals(TransferOperation.COPY, t1.operation); + assertEquals("copy-key", t1.fromKey); + assertEquals("copy-key-to", t1.toKey); + + ConfigUpgradeChangeDefinition.Transfer t2 = hdp_2_1_1_nm_pre_upgrade.getTransfers().get(1); + assertEquals(TransferOperation.COPY, t2.operation); + assertEquals("my-site", t2.fromType); + assertEquals("my-copy-key", t2.fromKey); + assertEquals("my-copy-key-to", t2.toKey); + assertTrue(t2.keepKeys.isEmpty()); + + ConfigUpgradeChangeDefinition.Transfer t3 = hdp_2_1_1_nm_pre_upgrade.getTransfers().get(2); + assertEquals(TransferOperation.MOVE, t3.operation); + assertEquals("move-key", t3.fromKey); + assertEquals("move-key-to", t3.toKey); + + ConfigUpgradeChangeDefinition.Transfer t4 = hdp_2_1_1_nm_pre_upgrade.getTransfers().get(3); + assertEquals(TransferOperation.DELETE, t4.operation); + assertEquals("delete-key", t4.deleteKey); + assertNull(t4.toKey); + assertTrue(t4.preserveEdits); + assertEquals(1, t4.keepKeys.size()); + assertEquals("important-key", t4.keepKeys.get(0)); + + } + +}
http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/test/java/org/apache/ambari/server/state/stack/UpgradePackTest.java ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/stack/UpgradePackTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/stack/UpgradePackTest.java index 9ae78c4..0cd734e 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/state/stack/UpgradePackTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/state/stack/UpgradePackTest.java @@ -36,13 +36,17 @@ import org.apache.ambari.server.state.stack.UpgradePack.ProcessingComponent; import org.apache.ambari.server.state.stack.upgrade.ClusterGrouping; import org.apache.ambari.server.state.stack.upgrade.ClusterGrouping.ExecuteStage; import org.apache.ambari.server.state.stack.upgrade.ConfigureTask; -import org.apache.ambari.server.state.stack.upgrade.ConfigureTask.Transfer; +import org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.Transfer; import org.apache.ambari.server.state.stack.upgrade.Direction; import org.apache.ambari.server.state.stack.upgrade.Grouping; +import org.apache.ambari.server.state.stack.upgrade.RestartGrouping; import org.apache.ambari.server.state.stack.upgrade.RestartTask; +import org.apache.ambari.server.state.stack.upgrade.StopGrouping; import org.apache.ambari.server.state.stack.upgrade.ServiceCheckGrouping; import org.apache.ambari.server.state.stack.upgrade.Task; import org.apache.ambari.server.state.stack.upgrade.TransferOperation; +import org.apache.ambari.server.state.stack.upgrade.UpdateStackGrouping; +import org.apache.ambari.server.state.stack.upgrade.UpgradeType; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -88,9 +92,8 @@ public class UpgradePackTest { Map<String, UpgradePack> upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); assertTrue(upgrades.size() > 0); assertTrue(upgrades.containsKey("upgrade_test")); - - UpgradePack up = upgrades.get("upgrade_test"); - assertEquals("2.2.*", up.getTarget()); + UpgradePack upgrade = upgrades.get("upgrade_test"); + assertEquals("2.2.*.*", upgrade.getTarget()); Map<String, List<String>> expectedStages = new LinkedHashMap<String, List<String>>() {{ put("ZOOKEEPER", Arrays.asList("ZOOKEEPER_SERVER")); @@ -100,24 +103,24 @@ public class UpgradePackTest { // !!! test the tasks int i = 0; for (Entry<String, List<String>> entry : expectedStages.entrySet()) { - assertTrue(up.getTasks().containsKey(entry.getKey())); - assertEquals(i++, indexOf(up.getTasks(), entry.getKey())); + assertTrue(upgrade.getTasks().containsKey(entry.getKey())); + assertEquals(i++, indexOf(upgrade.getTasks(), entry.getKey())); // check that the number of components matches - assertEquals(entry.getValue().size(), up.getTasks().get(entry.getKey()).size()); + assertEquals(entry.getValue().size(), upgrade.getTasks().get(entry.getKey()).size()); // check component ordering int j = 0; for (String comp : entry.getValue()) { - assertEquals(j++, indexOf(up.getTasks().get(entry.getKey()), comp)); + assertEquals(j++, indexOf(upgrade.getTasks().get(entry.getKey()), comp)); } } // !!! test specific tasks - assertTrue(up.getTasks().containsKey("HDFS")); - assertTrue(up.getTasks().get("HDFS").containsKey("NAMENODE")); + assertTrue(upgrade.getTasks().containsKey("HDFS")); + assertTrue(upgrade.getTasks().get("HDFS").containsKey("NAMENODE")); - ProcessingComponent pc = up.getTasks().get("HDFS").get("NAMENODE"); + ProcessingComponent pc = upgrade.getTasks().get("HDFS").get("NAMENODE"); assertNotNull(pc.preTasks); assertNotNull(pc.postTasks); assertNotNull(pc.tasks); @@ -129,17 +132,17 @@ public class UpgradePackTest { assertEquals(RestartTask.class, pc.tasks.get(0).getClass()); - assertTrue(up.getTasks().containsKey("ZOOKEEPER")); - assertTrue(up.getTasks().get("ZOOKEEPER").containsKey("ZOOKEEPER_SERVER")); + assertTrue(upgrade.getTasks().containsKey("ZOOKEEPER")); + assertTrue(upgrade.getTasks().get("ZOOKEEPER").containsKey("ZOOKEEPER_SERVER")); - pc = up.getTasks().get("HDFS").get("DATANODE"); + pc = upgrade.getTasks().get("HDFS").get("DATANODE"); assertNotNull(pc.preDowngradeTasks); assertEquals(0, pc.preDowngradeTasks.size()); assertNotNull(pc.postDowngradeTasks); assertEquals(1, pc.postDowngradeTasks.size()); - pc = up.getTasks().get("ZOOKEEPER").get("ZOOKEEPER_SERVER"); + pc = upgrade.getTasks().get("ZOOKEEPER").get("ZOOKEEPER_SERVER"); assertNotNull(pc.preTasks); assertEquals(1, pc.preTasks.size()); assertNotNull(pc.postTasks); @@ -147,56 +150,22 @@ public class UpgradePackTest { assertNotNull(pc.tasks); assertEquals(1, pc.tasks.size()); - pc = up.getTasks().get("YARN").get("NODEMANAGER"); + pc = upgrade.getTasks().get("YARN").get("NODEMANAGER"); assertNotNull(pc.preTasks); assertEquals(2, pc.preTasks.size()); Task t = pc.preTasks.get(1); assertEquals(ConfigureTask.class, t.getClass()); ConfigureTask ct = (ConfigureTask) t; - assertEquals("core-site", ct.getConfigType()); - assertEquals(4, ct.getTransfers().size()); - - /* - <transfer operation="COPY" from-key="copy-key" to-key="copy-key-to" /> - <transfer operation="COPY" from-type="my-site" from-key="my-copy-key" to-key="my-copy-key-to" /> - <transfer operation="MOVE" from-key="move-key" to-key="move-key-to" /> - <transfer operation="DELETE" delete-key="delete-key"> - <keep-key>important-key</keep-key> - </transfer> - */ - Transfer t1 = ct.getTransfers().get(0); - assertEquals(TransferOperation.COPY, t1.operation); - assertEquals("copy-key", t1.fromKey); - assertEquals("copy-key-to", t1.toKey); - - Transfer t2 = ct.getTransfers().get(1); - assertEquals(TransferOperation.COPY, t2.operation); - assertEquals("my-site", t2.fromType); - assertEquals("my-copy-key", t2.fromKey); - assertEquals("my-copy-key-to", t2.toKey); - assertTrue(t2.keepKeys.isEmpty()); - - Transfer t3 = ct.getTransfers().get(2); - assertEquals(TransferOperation.MOVE, t3.operation); - assertEquals("move-key", t3.fromKey); - assertEquals("move-key-to", t3.toKey); - - Transfer t4 = ct.getTransfers().get(3); - assertEquals(TransferOperation.DELETE, t4.operation); - assertEquals("delete-key", t4.deleteKey); - assertNull(t4.toKey); - assertTrue(t4.preserveEdits); - assertEquals(1, t4.keepKeys.size()); - assertEquals("important-key", t4.keepKeys.get(0)); + // check that the Configure task successfully parsed id + assertEquals("hdp_2_1_1_nm_pre_upgrade", ct.getId()); } @Test - public void testGroupOrders() { + public void testGroupOrdersForRolling() { Map<String, UpgradePack> upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); assertTrue(upgrades.size() > 0); assertTrue(upgrades.containsKey("upgrade_test_checks")); - - UpgradePack up = upgrades.get("upgrade_test_checks"); + UpgradePack upgrade = upgrades.get("upgrade_test_checks"); List<String> expected_up = Arrays.asList( "PRE_CLUSTER", @@ -219,7 +188,7 @@ public class UpgradePackTest { Grouping serviceCheckGroup = null; int i = 0; - List<Grouping> groups = up.getGroups(Direction.UPGRADE); + List<Grouping> groups = upgrade.getGroups(Direction.UPGRADE); for (Grouping g : groups) { assertEquals(expected_up.get(i), g.name); i++; @@ -245,7 +214,7 @@ public class UpgradePackTest { i = 0; - groups = up.getGroups(Direction.DOWNGRADE); + groups = upgrade.getGroups(Direction.DOWNGRADE); for (Grouping g : groups) { assertEquals(expected_down.get(i), g.name); i++; @@ -253,15 +222,44 @@ public class UpgradePackTest { } + // TODO AMBARI-12698, add the Downgrade case @Test - public void testDirection() throws Exception { + public void testGroupOrdersForNonRolling() { Map<String, UpgradePack> upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); assertTrue(upgrades.size() > 0); - assertTrue(upgrades.containsKey("upgrade_direction")); + assertTrue(upgrades.containsKey("upgrade_test_nonrolling")); + UpgradePack upgrade = upgrades.get("upgrade_test_nonrolling"); - UpgradePack up = upgrades.get("upgrade_direction"); + List<String> expected_up = Arrays.asList( + "PRE_CLUSTER", + "Stop High-Level Daemons", + "Backups", + "Stop Low-Level Daemons", + "UPDATE_DESIRED_STACK_ID", + "ALL_HOST_OPS", + "ZOOKEEPER", + "HDFS", + "MR and YARN", + "POST_CLUSTER"); - List<Grouping> groups = up.getGroups(Direction.UPGRADE); + int i = 0; + List<Grouping> groups = upgrade.getGroups(Direction.UPGRADE); + for (Grouping g : groups) { + assertEquals(expected_up.get(i), g.name); + i++; + } + } + + @Test + public void testDirectionForRolling() throws Exception { + Map<String, UpgradePack> upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); + assertTrue(upgrades.size() > 0); + assertTrue(upgrades.containsKey("upgrade_direction")); + + UpgradePack upgrade = upgrades.get("upgrade_direction"); + assertTrue(upgrade.getType() == UpgradeType.ROLLING); + + List<Grouping> groups = upgrade.getGroups(Direction.UPGRADE); assertEquals(4, groups.size()); Grouping group = groups.get(2); assertEquals(ClusterGrouping.class, group.getClass()); @@ -274,7 +272,7 @@ public class UpgradePackTest { assertNotNull(stages.get(0).intendedDirection); assertEquals(Direction.DOWNGRADE, stages.get(0).intendedDirection); - groups = up.getGroups(Direction.DOWNGRADE); + groups = upgrade.getGroups(Direction.DOWNGRADE); assertEquals(3, groups.size()); // there are two clustergroupings at the end group = groups.get(1); @@ -300,7 +298,75 @@ public class UpgradePackTest { Assert.assertTrue(upgradePack.isComponentFailureAutoSkipped()); Assert.assertTrue(upgradePack.isServiceCheckFailureAutoSkipped()); } + + @Test + public void testDirectionForNonRolling() throws Exception { + Map<String, UpgradePack> upgrades = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1"); + assertTrue(upgrades.size() > 0); + assertTrue(upgrades.containsKey("upgrade_test_nonrolling")); + + UpgradePack upgrade = upgrades.get("upgrade_test_nonrolling"); + assertTrue(upgrade.getType() == UpgradeType.NON_ROLLING); + List<Grouping> groups = upgrade.getGroups(Direction.UPGRADE); + assertEquals(10, groups.size()); + + Grouping group = null; + ClusterGrouping clusterGroup = null; + UpdateStackGrouping updateStackGroup = null; + StopGrouping stopGroup = null; + RestartGrouping restartGroup = null; + + group = groups.get(0); + assertEquals(ClusterGrouping.class, group.getClass()); + clusterGroup = (ClusterGrouping) group; + assertEquals("Prepare Upgrade", clusterGroup.title); + + group = groups.get(1); + assertEquals(StopGrouping.class, group.getClass()); + stopGroup = (StopGrouping) group; + assertEquals("Stop Daemons for High-Level Services", stopGroup.title); + + group = groups.get(2); + assertEquals(ClusterGrouping.class, group.getClass()); + clusterGroup = (ClusterGrouping) group; + assertEquals("Take Backups", clusterGroup.title); + + group = groups.get(3); + assertEquals(StopGrouping.class, group.getClass()); + stopGroup = (StopGrouping) group; + assertEquals("Stop Daemons for Low-Level Services", stopGroup.title); + + group = groups.get(4); + assertEquals(UpdateStackGrouping.class, group.getClass()); + updateStackGroup = (UpdateStackGrouping) group; + assertEquals("Update Desired Stack Id", updateStackGroup.title); + + group = groups.get(5); + assertEquals(ClusterGrouping.class, group.getClass()); + clusterGroup = (ClusterGrouping) group; + assertEquals("Set Version On All Hosts", clusterGroup.title); + + group = groups.get(6); + assertEquals(RestartGrouping.class, group.getClass()); + restartGroup = (RestartGrouping) group; + assertEquals("Zookeeper", restartGroup.title); + + group = groups.get(7); + assertEquals(RestartGrouping.class, group.getClass()); + restartGroup = (RestartGrouping) group; + assertEquals("HDFS", restartGroup.title); + + group = groups.get(8); + assertEquals(RestartGrouping.class, group.getClass()); + restartGroup = (RestartGrouping) group; + assertEquals("MR and YARN", restartGroup.title); + + group = groups.get(9); + assertEquals(ClusterGrouping.class, group.getClass()); + clusterGroup = (ClusterGrouping) group; + assertEquals("Finalize {{direction.text.proper}}", clusterGroup.title); + } private int indexOf(Map<String, ?> map, String keyToFind) { int result = -1; @@ -315,6 +381,4 @@ public class UpgradePackTest { return result; } - - } http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java index e2a3995..bac00d4 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java @@ -43,7 +43,7 @@ public class StageWrapperBuilderTest { */ @Test public void testBuildOrder() throws Exception { - UpgradeContext upgradeContext = new UpgradeContext(null, null, null, null, Direction.UPGRADE); + UpgradeContext upgradeContext = new UpgradeContext(null, null, null, null, Direction.UPGRADE, UpgradeType.ROLLING); MockStageWrapperBuilder builder = new MockStageWrapperBuilder(null); List<StageWrapper> stageWrappers = builder.build(upgradeContext); List<Integer> invocationOrder = builder.getInvocationOrder(); @@ -64,7 +64,7 @@ public class StageWrapperBuilderTest { */ @Test public void testAutoSkipCheckInserted() throws Exception { - UpgradeContext upgradeContext = new UpgradeContext(null, null, null, null, Direction.UPGRADE); + UpgradeContext upgradeContext = new UpgradeContext(null, null, null, null, Direction.UPGRADE, UpgradeType.ROLLING); upgradeContext.setAutoSkipComponentFailures(true); upgradeContext.setAutoSkipServiceCheckFailures(true); http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog213Test.java ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog213Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog213Test.java index 73b3a18..b3c8543 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog213Test.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog213Test.java @@ -18,11 +18,15 @@ package org.apache.ambari.server.upgrade; +import static org.easymock.EasyMock.anyLong; +import static org.easymock.EasyMock.anyObject; import static org.easymock.EasyMock.createMock; +import static org.easymock.EasyMock.capture; import static org.easymock.EasyMock.createMockBuilder; import static org.easymock.EasyMock.createNiceMock; import static org.easymock.EasyMock.createStrictMock; import static org.easymock.EasyMock.expect; +import static org.easymock.EasyMock.eq; import static org.easymock.EasyMock.expectLastCall; import static org.easymock.EasyMock.replay; import static org.easymock.EasyMock.reset; @@ -30,37 +34,64 @@ import static org.easymock.EasyMock.verify; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import javax.persistence.EntityManager; import com.google.inject.AbstractModule; +import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.api.services.AmbariMetaInfo; import org.apache.ambari.server.controller.AmbariManagementController; import org.apache.ambari.server.orm.DBAccessor; import org.apache.ambari.server.orm.GuiceJpaInitializer; import org.apache.ambari.server.orm.InMemoryDefaultTestModule; +import org.apache.ambari.server.orm.dao.ClusterDAO; +import org.apache.ambari.server.orm.dao.ClusterVersionDAO; +import org.apache.ambari.server.orm.dao.DaoUtils; +import org.apache.ambari.server.orm.dao.HostVersionDAO; +import org.apache.ambari.server.orm.dao.RepositoryVersionDAO; import org.apache.ambari.server.orm.dao.StackDAO; +import org.apache.ambari.server.orm.entities.ClusterEntity; +import org.apache.ambari.server.orm.entities.ClusterVersionEntity; +import org.apache.ambari.server.orm.entities.HostEntity; +import org.apache.ambari.server.orm.entities.HostVersionEntity; +import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; import org.apache.ambari.server.orm.entities.StackEntity; +import org.apache.ambari.server.stack.StackManagerFactory; import org.apache.ambari.server.state.Cluster; import org.apache.ambari.server.state.Clusters; import org.apache.ambari.server.state.Config; import org.apache.ambari.server.state.ConfigHelper; +import org.apache.ambari.server.state.RepositoryVersionState; +import org.apache.ambari.server.state.StackId; +import org.apache.ambari.server.state.StackInfo; import org.apache.ambari.server.state.stack.OsFamily; +import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper; import org.easymock.EasyMock; import org.easymock.EasyMockSupport; +import org.easymock.Capture; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import java.lang.reflect.Field; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + import com.google.inject.Binder; import com.google.inject.Guice; import com.google.inject.Injector; import com.google.inject.Module; import com.google.inject.Provider; import com.google.inject.persist.PersistService; +import org.apache.ambari.server.configuration.Configuration; /** * {@link org.apache.ambari.server.upgrade.UpgradeCatalog213} unit tests. @@ -71,6 +102,13 @@ public class UpgradeCatalog213Test { private EntityManager entityManager = createNiceMock(EntityManager.class); private UpgradeCatalogHelper upgradeCatalogHelper; private StackEntity desiredStackEntity; + private AmbariManagementController amc = createNiceMock(AmbariManagementController.class); + private AmbariMetaInfo metaInfo = createNiceMock(AmbariMetaInfo.class); + private StackDAO stackDAO = createNiceMock(StackDAO.class); + private RepositoryVersionDAO repositoryVersionDAO = createNiceMock(RepositoryVersionDAO.class); + private ClusterVersionDAO clusterVersionDAO = createNiceMock(ClusterVersionDAO.class); + private HostVersionDAO hostVersionDAO = createNiceMock(HostVersionDAO.class); + private ClusterDAO clusterDAO = createNiceMock(ClusterDAO.class); @Before public void init() { @@ -94,7 +132,51 @@ public class UpgradeCatalog213Test { } @Test + public void testExecuteDDLUpdates() throws Exception { + final DBAccessor dbAccessor = createNiceMock(DBAccessor.class); + UpgradeCatalog213 upgradeCatalog = (UpgradeCatalog213) getUpgradeCatalog(dbAccessor); + + upgradeCatalog.executeDDLUpdates(); + } + + @Test public void testExecuteDMLUpdates() throws Exception { + // TODO AMBARI-13001, readd unit test section. + /* + final DBAccessor dbAccessor = createNiceMock(DBAccessor.class); + Configuration configuration = createNiceMock(Configuration.class); + Connection connection = createNiceMock(Connection.class); + Statement statement = createNiceMock(Statement.class); + ResultSet resultSet = createNiceMock(ResultSet.class); + expect(configuration.getDatabaseUrl()).andReturn(Configuration.JDBC_IN_MEMORY_URL).anyTimes(); + dbAccessor.getConnection(); + expectLastCall().andReturn(connection).anyTimes(); + connection.createStatement(); + expectLastCall().andReturn(statement).anyTimes(); + statement.executeQuery(anyObject(String.class)); + expectLastCall().andReturn(resultSet).anyTimes(); + + // Technically, this is a DDL, but it has to be ran during the DML portion + // because it requires the persistence layer to be started. + UpgradeSectionDDL upgradeSectionDDL = new UpgradeSectionDDL(); + + // Execute any DDL schema changes + upgradeSectionDDL.execute(dbAccessor); + + // Begin DML verifications + verifyBootstrapHDP21(); + + // Replay main sections + replay(dbAccessor, configuration, resultSet, connection, statement); + + + AbstractUpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor); + Class<?> c = AbstractUpgradeCatalog.class; + Field f = c.getDeclaredField("configuration"); + f.setAccessible(true); + f.set(upgradeCatalog, configuration); + */ + Method addMissingConfigs = UpgradeCatalog213.class.getDeclaredMethod("addMissingConfigs"); Method updateAMSConfigs = UpgradeCatalog213.class.getDeclaredMethod("updateAMSConfigs"); Method updateAlertDefinitions = UpgradeCatalog213.class.getDeclaredMethod("updateAlertDefinitions"); @@ -117,8 +199,97 @@ public class UpgradeCatalog213Test { upgradeCatalog213.executeDMLUpdates(); verify(upgradeCatalog213); + + //verify(dbAccessor, configuration, resultSet, connection, statement); + + // Verify sections + //upgradeSectionDDL.verify(dbAccessor); + } + + /** + * Verify that when bootstrapping HDP 2.1, records get inserted into the + * repo_version, cluster_version, and host_version tables. + * @throws AmbariException + */ + private void verifyBootstrapHDP21() throws Exception, AmbariException { + final String stackName = "HDP"; + final String stackVersion = "2.1"; + final String stackNameAndVersion = stackName + "-" + stackVersion; + final String buildNumber = "2.1.0.0-0001"; + final String stackAndBuild = stackName + "-" + buildNumber; + final String clusterName = "c1"; + + expect(amc.getAmbariMetaInfo()).andReturn(metaInfo); + + // Mock the actions to bootstrap if using HDP 2.1 + Clusters clusters = createNiceMock(Clusters.class); + expect(amc.getClusters()).andReturn(clusters); + + Map<String, Cluster> clusterHashMap = new HashMap<String, Cluster>(); + Cluster cluster = createNiceMock(Cluster.class); + clusterHashMap.put(clusterName, cluster); + expect(clusters.getClusters()).andReturn(clusterHashMap); + + StackId stackId = new StackId(stackNameAndVersion); + expect(cluster.getCurrentStackVersion()).andReturn(stackId); + + StackInfo stackInfo = new StackInfo(); + stackInfo.setVersion(buildNumber); + expect(metaInfo.getStack(stackName, stackVersion)).andReturn(stackInfo); + + StackEntity stackEntity = createNiceMock(StackEntity.class); + expect(stackEntity.getStackName()).andReturn(stackName); + expect(stackEntity.getStackVersion()).andReturn(stackVersion); + + expect(stackDAO.find(stackName, stackVersion)).andReturn(stackEntity); + + replay(amc, metaInfo, clusters, cluster, stackEntity, stackDAO); + + // Mock more function calls + // Repository Version + RepositoryVersionEntity repositoryVersionEntity = createNiceMock(RepositoryVersionEntity.class); + expect(repositoryVersionDAO.findByDisplayName(stackAndBuild)).andReturn(null); + expect(repositoryVersionDAO.findMaxId("id")).andReturn(0L); + expect(repositoryVersionDAO.findAll()).andReturn(Collections.<RepositoryVersionEntity>emptyList()); + expect(repositoryVersionDAO.create(anyObject(StackEntity.class), anyObject(String.class), anyObject(String.class), anyObject(String.class))).andReturn(repositoryVersionEntity); + expect(repositoryVersionEntity.getId()).andReturn(1L); + expect(repositoryVersionEntity.getVersion()).andReturn(buildNumber); + replay(repositoryVersionDAO, repositoryVersionEntity); + + // Cluster Version + ClusterVersionEntity clusterVersionEntity = createNiceMock(ClusterVersionEntity.class); + expect(clusterVersionEntity.getId()).andReturn(1L); + expect(clusterVersionEntity.getState()).andReturn(RepositoryVersionState.CURRENT); + expect(clusterVersionEntity.getRepositoryVersion()).andReturn(repositoryVersionEntity); + + expect(clusterVersionDAO.findByClusterAndStackAndVersion(anyObject(String.class), anyObject(StackId.class), anyObject(String.class))).andReturn(null); + expect(clusterVersionDAO.findMaxId("id")).andReturn(0L); + expect(clusterVersionDAO.findAll()).andReturn(Collections.<ClusterVersionEntity>emptyList()); + expect(clusterVersionDAO.create(anyObject(ClusterEntity.class), anyObject(RepositoryVersionEntity.class), anyObject(RepositoryVersionState.class), anyLong(), anyLong(), anyObject(String.class))).andReturn(clusterVersionEntity); + replay(clusterVersionDAO, clusterVersionEntity); + + // Host Version + ClusterEntity clusterEntity = createNiceMock(ClusterEntity.class); + expect(clusterEntity.getClusterName()).andReturn(clusterName).anyTimes(); + expect(clusterDAO.findByName(anyObject(String.class))).andReturn(clusterEntity); + + Collection<HostEntity> hostEntities = new ArrayList<HostEntity>(); + HostEntity hostEntity1 = createNiceMock(HostEntity.class); + HostEntity hostEntity2 = createNiceMock(HostEntity.class); + expect(hostEntity1.getHostName()).andReturn("host1"); + expect(hostEntity2.getHostName()).andReturn("host2"); + hostEntities.add(hostEntity1); + hostEntities.add(hostEntity2); + expect(clusterEntity.getHostEntities()).andReturn(hostEntities); + + expect(hostVersionDAO.findByClusterStackVersionAndHost(anyObject(String.class), anyObject(StackId.class), anyObject(String.class), anyObject(String.class))).andReturn(null); + expect(hostVersionDAO.findMaxId("id")).andReturn(0L); + expect(hostVersionDAO.findAll()).andReturn(Collections.<HostVersionEntity>emptyList()); + + replay(clusterEntity, clusterDAO, hostVersionDAO, hostEntity1, hostEntity2); } + @Test public void testUpdateStormSiteConfigs() throws Exception { EasyMockSupport easyMockSupport = new EasyMockSupport(); @@ -292,9 +463,19 @@ public class UpgradeCatalog213Test { binder.bind(DBAccessor.class).toInstance(dbAccessor); binder.bind(EntityManager.class).toInstance(entityManager); binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class)); + binder.bind(DaoUtils.class).toInstance(createNiceMock(DaoUtils.class)); + binder.bind(ClusterDAO.class).toInstance(clusterDAO); + binder.bind(RepositoryVersionHelper.class).toInstance(createNiceMock(RepositoryVersionHelper.class)); + binder.bind(Clusters.class).toInstance(createNiceMock(Clusters.class)); + binder.bind(AmbariManagementController.class).toInstance(amc); + binder.bind(AmbariMetaInfo.class).toInstance(metaInfo); + binder.bind(StackManagerFactory.class).toInstance(createNiceMock(StackManagerFactory.class)); + binder.bind(StackDAO.class).toInstance(stackDAO); + binder.bind(RepositoryVersionDAO.class).toInstance(repositoryVersionDAO); + binder.bind(ClusterVersionDAO.class).toInstance(clusterVersionDAO); + binder.bind(HostVersionDAO.class).toInstance(hostVersionDAO); } }; - Injector injector = Guice.createInjector(module); return injector.getInstance(UpgradeCatalog213.class); } @@ -313,4 +494,41 @@ public class UpgradeCatalog213Test { Assert.assertEquals("2.1.3", upgradeCatalog.getTargetVersion()); } + + // *********** Inner Classes that represent sections of the DDL *********** + // ************************************************************************ + + /** + * Verify that the upgrade table has two columns added to it. + */ + class UpgradeSectionDDL implements SectionDDL { + + Capture<DBAccessor.DBColumnInfo> upgradeTablePackageNameColumnCapture = new Capture<DBAccessor.DBColumnInfo>(); + Capture<DBAccessor.DBColumnInfo> upgradeTableUpgradeTypeColumnCapture = new Capture<DBAccessor.DBColumnInfo>(); + + /** + * {@inheritDoc} + */ + @Override + public void execute(DBAccessor dbAccessor) throws SQLException { + // Add columns + dbAccessor.addColumn(eq("upgrade"), capture(upgradeTablePackageNameColumnCapture)); + dbAccessor.addColumn(eq("upgrade"), capture(upgradeTableUpgradeTypeColumnCapture)); + } + + /** + * {@inheritDoc} + */ + @Override + public void verify(DBAccessor dbAccessor) throws SQLException { + // Verification section + DBAccessor.DBColumnInfo packageNameCol = upgradeTablePackageNameColumnCapture.getValue(); + Assert.assertEquals(String.class, packageNameCol.getType()); + Assert.assertEquals("upgrade_package", packageNameCol.getName()); + + DBAccessor.DBColumnInfo upgradeTypeCol = upgradeTableUpgradeTypeColumnCapture.getValue(); + Assert.assertEquals(String.class, upgradeTypeCol.getType()); + Assert.assertEquals("upgrade_type", upgradeTypeCol.getName()); + } + } } \ No newline at end of file http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py index f96d8a7..04dd3bb 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py +++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py @@ -727,7 +727,7 @@ class TestHBaseMaster(RMFTestCase): def test_upgrade_backup(self): self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_upgrade.py", classname = "HbaseMasterUpgrade", - command = "snapshot", + command = "take_snapshot", config_file="hbase-preupgrade.json", hdp_stack_version = self.STACK_VERSION, target = RMFTestCase.TARGET_COMMON_SERVICES) http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json.orig ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json.orig b/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json.orig deleted file mode 100644 index 9dad8e1..0000000 --- a/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json.orig +++ /dev/null @@ -1,374 +0,0 @@ -{ - "configuration_attributes": { - "storm-site": {}, - "hdfs-site": { - "final": { - "dfs.support.append": "true", - "dfs.namenode.http-address": "true" - } - }, - "storm-env": {}, - "core-site": { - "final": { - "fs.defaultFS": "true" - } - }, - "hadoop-policy": {}, - "hdfs-log4j": {}, - "hadoop-env": {}, - "zookeeper-env": {}, - "zookeeper-log4j": {}, - "cluster-env": {} - }, - "commandParams": { - "command_timeout": "600", - "script": "scripts/nimbus.py", - "script_type": "PYTHON", - "service_package_folder": "HDP/2.1/services/STORM/package", - "hooks_folder": "HDP/2.0.6/hooks" - }, - "roleCommand": "START", - "clusterName": "pacan", - "hostname": "c6402.ambari.apache.org", - "hostLevelParams": { - "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", - "ambari_db_rca_password": "mapred", - "java_home": "/usr/jdk64/jdk1.7.0_45", - "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", - "jce_name": "UnlimitedJCEPolicyJDK7.zip", - "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", - "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.2\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\"},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.20\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\"}]", - "group_list": "[\"hadoop\",\"users\"]", - "package_list": "[{\"name\":\"storm_2_2_0_0_*\"}]", - "stack_version": "2.2", - "stack_name": "HDP", - "db_name": "ambari", - "ambari_db_rca_driver": "org.postgresql.Driver", - "jdk_name": "jdk-7u45-linux-x64.tar.gz", - "ambari_db_rca_username": "mapred", - "db_driver_filename": "mysql-connector-java.jar", - "user_list": "[\"ambari-qa\",\"zookeeper\",\"hdfs\",\"storm\"]", - "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar" - }, - "commandType": "EXECUTION_COMMAND", - "roleParams": {}, - "serviceName": "STORM", - "role": "NIMBUS", - "forceRefreshConfigTags": [], - "taskId": 54, - "public_hostname": "c6402.ambari.apache.org", - "configurations": { - "storm-site": { - "topology.tuple.serializer": "backtype.storm.serialization.types.ListDelegateSerializer", - "topology.workers": "1", - "drpc.worker.threads": "64", - "storm.zookeeper.servers": "['c6402.ambari.apache.org']", - "supervisor.heartbeat.frequency.secs": "5", - "topology.executor.send.buffer.size": "1024", - "drpc.childopts": "-Xmx768m", - "nimbus.thrift.port": "6627", - "storm.zookeeper.retry.intervalceiling.millis": "30000", - "storm.local.dir": "/hadoop/storm", - "topology.receiver.buffer.size": "8", - "storm.messaging.netty.client_worker_threads": "1", - "transactional.zookeeper.root": "/transactional", - "topology.skip.missing.kryo.registrations": "false", - "worker.heartbeat.frequency.secs": "1", - "zmq.hwm": "0", - "storm.zookeeper.connection.timeout": "15000", - "_storm.thrift.secure.transport": "SECURED_TRANSPORT_CLASS", - "storm.messaging.netty.server_worker_threads": "1", - "supervisor.worker.start.timeout.secs": "120", - "zmq.threads": "1", - "topology.acker.executors": "null", - "storm.local.mode.zmq": "false", - "topology.max.task.parallelism": "null", - "topology.max.error.report.per.interval": "5", - "storm.zookeeper.port": "2181", - "drpc.queue.size": "128", - "worker.childopts": "-Xmx768m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM", - "nimbus.childopts": "-Xmx1024m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM", - "storm.zookeeper.retry.times": "5", - "nimbus.monitor.freq.secs": "10", - "storm.cluster.mode": "distributed", - "dev.zookeeper.path": "/tmp/dev-storm-zookeeper", - "drpc.invocations.port": "3773", - "_storm.thrift.nonsecure.transport": "NON_SECURED_TRANSPORT_CLASS", - "storm.zookeeper.root": "/storm", - "logviewer.childopts": "-Xmx128m", - "transactional.zookeeper.port": "null", - "topology.worker.childopts": "null", - "topology.max.spout.pending": "1000", - "nimbus.cleanup.inbox.freq.secs": "600", - "storm.messaging.netty.min_wait_ms": "100", - "nimbus.task.timeout.secs": "30", - "nimbus.thrift.max_buffer_size": "1048576", - "topology.sleep.spout.wait.strategy.time.ms": "1", - "topology.optimize": "true", - "nimbus.reassign": "true", - "storm.messaging.transport": "backtype.storm.messaging.netty.Context", - "logviewer.appender.name": "A1", - "nimbus.host": "c6402.ambari.apache.org", - "ui.port": "8744", - "supervisor.slots.ports": "[6700, 6701]", - "nimbus.file.copy.expiration.secs": "600", - "supervisor.monitor.frequency.secs": "3", - "ui.childopts": "-Xmx768m _JAAS_PLACEHOLDER", - "transactional.zookeeper.servers": "null", - "zmq.linger.millis": "5000", - "topology.error.throttle.interval.secs": "10", - "topology.worker.shared.thread.pool.size": "4", - "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm/lib", - "topology.spout.wait.strategy": "backtype.storm.spout.SleepSpoutWaitStrategy", - "task.heartbeat.frequency.secs": "3", - "topology.transfer.buffer.size": "1024", - "storm.zookeeper.session.timeout": "20000", - "topology.executor.receive.buffer.size": "1024", - "topology.stats.sample.rate": "0.05", - "topology.fall.back.on.java.serialization": "true", - "supervisor.childopts": "-Xmx256m _JAAS_PLACEHOLDER -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=56431 -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM", - "topology.enable.message.timeouts": "true", - "storm.messaging.netty.max_wait_ms": "1000", - "nimbus.topology.validator": "backtype.storm.nimbus.DefaultTopologyValidator", - "nimbus.supervisor.timeout.secs": "60", - "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy", - "nimbus.inbox.jar.expiration.secs": "3600", - "drpc.port": "3772", - "topology.kryo.factory": "backtype.storm.serialization.DefaultKryoFactory", - "storm.zookeeper.retry.interval": "1000", - "storm.messaging.netty.max_retries": "30", - "topology.tick.tuple.freq.secs": "null", - "drpc.request.timeout.secs": "600", - "nimbus.task.launch.secs": "120", - "task.refresh.poll.secs": "10", - "topology.message.timeout.secs": "30", - "storm.messaging.netty.buffer_size": "5242880", - "topology.state.synchronization.timeout.secs": "60", - "supervisor.worker.timeout.secs": "30", - "topology.trident.batch.emit.interval.millis": "500", - "topology.builtin.metrics.bucket.size.secs": "60", - "logviewer.port": "8000", - "topology.debug": "false" - }, - "hdfs-site": { - "dfs.namenode.avoid.write.stale.datanode": "true", - "dfs.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}", - "dfs.namenode.checkpoint.txns": "1000000", - "dfs.block.access.token.enable": "true", - "dfs.support.append": "true", - "dfs.datanode.address": "0.0.0.0:1019", - "dfs.cluster.administrators": " hdfs", - "dfs.journalnode.kerberos.principal": "jn/_h...@example.com", - "dfs.datanode.balance.bandwidthPerSec": "6250000", - "dfs.namenode.safemode.threshold-pct": "1.0f", - "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", - "dfs.permissions.enabled": "true", - "dfs.namenode.kerberos.principal": "nn/_h...@example.com", - "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary", - "dfs.https.port": "50470", - "dfs.namenode.https-address": "c6402.ambari.apache.org:50470", - "dfs.secondary.namenode.kerberos.https.principal": "HTTP/_h...@example.com", - "dfs.blockreport.initialDelay": "120", - "dfs.journalnode.edits.dir": "/grid/0/hdfs/journal", - "dfs.blocksize": "134217728", - "dfs.client.read.shortcircuit": "true", - "dfs.datanode.max.transfer.threads": "1024", - "dfs.heartbeat.interval": "3", - "dfs.replication": "3", - "dfs.namenode.handler.count": "40", - "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", - "fs.permissions.umask-mode": "022", - "dfs.namenode.stale.datanode.interval": "30000", - "dfs.datanode.ipc.address": "0.0.0.0:8010", - "dfs.namenode.name.dir": "/hadoop/hdfs/namenode", - "dfs.secondary.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}", - "dfs.datanode.data.dir": "/hadoop/hdfs/data", - "dfs.namenode.http-address": "c6402.ambari.apache.org:50070", - "dfs.webhdfs.enabled": "true", - "dfs.datanode.failed.volumes.tolerated": "0", - "dfs.namenode.accesstime.precision": "0", - "dfs.namenode.avoid.read.stale.datanode": "true", - "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090", - "dfs.datanode.kerberos.principal": "dn/_h...@example.com", - "dfs.journalnode.keytab.file": "/etc/security/keytabs/jn.service.keytab", - "dfs.datanode.http.address": "0.0.0.0:1022", - "dfs.datanode.du.reserved": "1073741824", - "dfs.client.read.shortcircuit.streams.cache.size": "4096", - "dfs.namenode.kerberos.https.principal": "HTTP/_h...@example.com", - "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", - "dfs.web.authentication.kerberos.principal": "HTTP/_h...@example.com", - "dfs.secondary.namenode.kerberos.principal": "nn/_h...@example.com", - "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab", - "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", - "dfs.permissions.superusergroup": "hdfs", - "dfs.journalnode.http-address": "0.0.0.0:8480", - "dfs.journalnode.kerberos.internal.spnego.principal": "HTTP/_h...@example.com", - "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", - "dfs.namenode.write.stale.datanode.ratio": "1.0f", - "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", - "dfs.datanode.data.dir.perm": "750", - "dfs.namenode.name.dir.restore": "true", - "dfs.replication.max": "50", - "dfs.namenode.checkpoint.period": "21600", - "dfs.http.policy": "HTTP_ONLY" - }, - "storm-env": { - "storm_log_dir": "/var/log/storm", - "storm_principal_name": "st...@example.com", - "storm_pid_dir": "/var/run/storm", - "storm_user": "storm", - "content": "\n#!/bin/bash\n\n# Set Storm specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}\n\n# export STORM_CONF_DIR=\"\"", - "nimbus_principal_name": "nimbus/_h...@example.com", - "storm_ui_keytab": "/etc/security/keytabs/http.storm.service.keytab", - "nimbus_keytab": "/etc/security/keytabs/nimbus.service.keytab", - "storm_keytab": "/etc/security/keytabs/storm.headless.keytab", - "storm_ui_principal_name": "HTTP/_HOST" - }, - "core-site": { - "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", - "fs.trash.interval": "360", - "hadoop.security.authentication": "kerberos", - "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec", - "hadoop.proxyuser.falcon.hosts": "*", - "mapreduce.jobtracker.webinterface.trusted": "false", - "hadoop.security.authorization": "true", - "fs.defaultFS": "hdfs://c6402.ambari.apache.org:8020", - "ipc.server.tcpnodelay": "true", - "ipc.client.connect.max.retries": "50", - "ipc.client.idlethreshold": "8000", - "io.file.buffer.size": "131072", - "hadoop.security.auth_to_local": "RULE:[2:$1@$0](rm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](dn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](jn/_HOST@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](falcon@.*EXAMPLE.COM)s/.*/falcon/\nDEFAULT", - "ipc.client.connection.maxidletime": "30000", - "hadoop.proxyuser.falcon.groups": "users" - }, - "hadoop-policy": { - "security.job.client.protocol.acl": "*", - "security.job.task.protocol.acl": "*", - "security.datanode.protocol.acl": "*", - "security.namenode.protocol.acl": "*", - "security.client.datanode.protocol.acl": "*", - "security.inter.tracker.protocol.acl": "*", - "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", - "security.client.protocol.acl": "*", - "security.refresh.policy.protocol.acl": "hadoop", - "security.admin.operations.protocol.acl": "hadoop", - "security.inter.datanode.protocol.acl": "*" - }, - "hdfs-log4j": { - "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit= false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.fi le}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\n log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN" - }, - "hadoop-env": { - "namenode_opt_maxnewsize": "200m", - "hdfs_log_dir_prefix": "/var/log/hadoop", - "namenode_heapsize": "1024m", - "proxyuser_group": "users", - "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab", - "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by defaul t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix} }/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/ $USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HAD OOP_SECURE_DN_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d \"/usr/lib/tez\" ]; then\n export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64\n\n#Hadoop logging options\nexport HADOOP_ROOT_LOGGER={{hadoop_root_logger}}", - "hdfs_user": "hdfs", - "namenode_opt_newsize": "200m", - "namenode_opt_permsize" : "128m", - "namenode_opt_maxpermsize" : "256m", - "dtnode_heapsize": "1024m", - "hadoop_root_logger": "INFO,RFA", - "hadoop_heapsize": "1024", - "hadoop_pid_dir_prefix": "/var/run/hadoop", - "hdfs_principal_name": "hdfs" - }, - "zookeeper-env": { - "clientPort": "2181", - "zookeeper_keytab_path": "/etc/security/keytabs/zk.service.keytab", - "zk_user": "zookeeper", - "zk_log_dir": "/var/log/zookeeper", - "syncLimit": "5", - "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}", - "initLimit": "10", - "zk_pid_dir": "/var/run/zookeeper", - "zk_data_dir": "/hadoop/zookeeper", - "zookeeper_principal_name": "zookeeper/_h...@example.com", - "tickTime": "2000" - }, - "zookeeper-log4j": { - "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n# Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n# Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n" - }, - "cluster-env": { - "security_enabled": "true", - "ignore_groupsusers_create": "false", - "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab", - "kerberos_domain": "EXAMPLE.COM", - "kinit_path_local": "/usr/bin", - "user_group": "hadoop", - "smokeuser": "ambari-qa" - } - }, - "configurationTags": { - "storm-site": { - "tag": "version1412001710682" - }, - "hdfs-site": { - "tag": "version1412001710682" - }, - "storm-env": { - "tag": "version1412001710682" - }, - "core-site": { - "tag": "version1412001710682" - }, - "hadoop-policy": { - "tag": "version1411996371868" - }, - "hdfs-log4j": { - "tag": "version1411996371868" - }, - "hadoop-env": { - "tag": "version1412001710682" - }, - "zookeeper-env": { - "tag": "version1412001710682" - }, - "zookeeper-log4j": { - "tag": "version1" - }, - "cluster-env": { - "tag": "version1412001710681" - } - }, - "commandId": "12-1", - "clusterHostInfo": { - "snamenode_host": [ - "c6402.ambari.apache.org" - ], - "drpc_server_hosts": [ - "c6402.ambari.apache.org" - ], - "nimbus_hosts": [ - "c6402.ambari.apache.org" - ], - "all_ping_ports": [ - "8670" - ], - "all_hosts": [ - "c6402.ambari.apache.org" - ], - "slave_hosts": [ - "c6402.ambari.apache.org" - ], - "namenode_host": [ - "c6402.ambari.apache.org" - ], - "storm_ui_server_hosts": [ - "c6402.ambari.apache.org" - ], - "storm_rest_api_hosts": [ - "c6402.ambari.apache.org" - ], - "ambari_server_host": [ - "c6401.ambari.apache.org" - ], - "zookeeper_hosts": [ - "c6402.ambari.apache.org" - ], - "supervisor_hosts": [ - "c6402.ambari.apache.org" - ] - } -} http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/config-upgrade.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/config-upgrade.xml b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/config-upgrade.xml new file mode 100644 index 0000000..1301f9d --- /dev/null +++ b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/config-upgrade.xml @@ -0,0 +1,101 @@ +<?xml version="1.0"?> +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + +<upgrade-config-changes xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> + + <services> + <service name="ZOOKEEPER"> + <component name="ZOOKEEPER_SERVER"> + <changes> + <definition xsi:type="configure" id="hdp_2_1_1_zk_post_upgrade"> + </definition> + </changes> + </component> + </service> + + <service name="HDFS"> + <component name="NAMENODE"> + <changes> + <definition xsi:type="configure" id="hdp_2_1_1_nn_pre_upgrade"> + <type>hdfs-site</type> + <set key="myproperty" value="mynewvalue"/> + </definition> + </changes> + </component> + </service> + + <service name="YARN"> + <component name="NODEMANAGER"> + <changes> + <definition xsi:type="configure" id="hdp_2_1_1_nm_pre_upgrade"> + <type>core-site</type> + <transfer operation="copy" from-key="copy-key" + to-key="copy-key-to"/> + <transfer operation="copy" from-type="my-site" + from-key="my-copy-key" + to-key="my-copy-key-to"/> + <transfer operation="move" from-key="move-key" + to-key="move-key-to"/> + <transfer operation="delete" delete-key="delete-key" + preserve-edits="true"> + <keep-key>important-key</keep-key> + </transfer> + </definition> + </changes> + </component> + </service> + + <service name="HIVE"> + <component name="HIVE_SERVER"> + <changes> + <definition xsi:type="configure" id="hdp_2_1_1_set_transport_mode"> + <condition type="hive-site" key="hive.server2.transport.mode" value="binary"> + <type>hive-site</type> + <key>hive.server2.thrift.port</key> + <value>10010</value> + </condition> + <condition type="hive-site" key="hive.server2.transport.mode" value="http"> + <type>hive-site</type> + <key>hive.server2.http.port</key> + <value>10011</value> + </condition> + </definition> + + <definition xsi:type="configure" id="hdp_2_1_1_hive_server_foo"> + <type>hive-site</type> + <set key="fooKey" value="fooValue"/> + <set key="fooKey2" value="fooValue2"/> + <set key="fooKey3" value="fooValue3"/> + <transfer operation="copy" from-key="copy-key" to-key="copy-key-to" /> + <transfer operation="move" from-key="move-key" to-key="move-key-to" /> + <transfer operation="delete" delete-key="delete-key" /> + <transfer operation="delete" delete-key="delete-http" if-key="hive.server2.transport.mode" if-type="hive-site" if-value="http" /> + <transfer operation="delete" delete-key="delete-https-fail" if-key="hive.server2.transport.mode" if-type="hive-site" if-value="https" /> + <transfer operation="delete" delete-key="delete-prop-fail" if-key="non.existent" if-type="hive-site" if-value="https" /> + <transfer operation="delete" delete-key="delete-type-fail" if-key="non.existent" if-type="non.existent" if-value="" /> + <transfer operation="delete" delete-key="delete-null-if-value" if-key="non.existent" if-type="non.existent" /> + <transfer operation="delete" delete-key="delete-blank-if-key" if-key="" if-type="non.existent" /> + <transfer operation="delete" delete-key="delete-blank-if-type" if-key="non.existent" if-type="" /> + <transfer operation="delete" delete-key="delete-thrift" if-key="hive.server2.thrift.port" if-type="hive-site" if-value="10001" /> + </definition> + </changes> + </component> + </service> + </services> + +</upgrade-config-changes> http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_bucket_test.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_bucket_test.xml b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_bucket_test.xml index 92e8c6a..0e6d914 100644 --- a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_bucket_test.xml +++ b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_bucket_test.xml @@ -16,8 +16,21 @@ limitations under the License. --> <upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> - <target>2.2.*</target> - + <target>2.2.*.*</target> + <target-stack>HDP-2.2.6</target-stack> + <type>ROLLING</type> + <prerequisite-checks> + <check>org.apache.ambari.server.checks.HiveMultipleMetastoreCheck</check> + <check>org.apache.ambari.server.checks.MapReduce2JobHistoryStatePreservingCheck</check> + <check>org.apache.ambari.server.checks.SecondaryNamenodeDeletedCheck</check> + <check>org.apache.ambari.server.checks.ServicesMapReduceDistributedCacheCheck</check> + <check>org.apache.ambari.server.checks.ServicesNamenodeHighAvailabilityCheck</check> + <check>org.apache.ambari.server.checks.ServicesNamenodeTruncateCheck</check> + <check>org.apache.ambari.server.checks.ServicesTezDistributedCacheCheck</check> + <check>org.apache.ambari.server.checks.ServicesYarnWorkPreservingCheck</check> + <check>org.apache.ambari.server.checks.YarnRMHighAvailabilityCheck</check> + <check>org.apache.ambari.server.checks.YarnTimelineServerStatePreservingCheck</check> + </prerequisite-checks> <order> <group name="ZOOKEEPER" title="Zookeeper"> http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_direction.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_direction.xml b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_direction.xml index 89a9e4f..e12fcd9 100644 --- a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_direction.xml +++ b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_direction.xml @@ -16,7 +16,9 @@ limitations under the License. --> <upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> - <target>2.2.*</target> + <target>2.2.*.*</target> + <target-stack>HDP-2.2.5</target-stack> + <type>ROLLING</type> <order> <group name="ZOOKEEPER" title="Zookeeper"> @@ -75,7 +77,7 @@ </task> </pre-upgrade> <upgrade> - <task xsi:type="restart" /> + <task xsi:type="restart-task" /> </upgrade> <post-upgrade> <task xsi:type="configure" />