Repository: ambari
Updated Branches:
  refs/heads/feature-branch-AMBARI-21307 ac71c44c5 -> b7f03cac8 (forced update)


AMBARI-22137 - Different stack versions should be able to link to different 
extension versions


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/03273bdc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/03273bdc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/03273bdc

Branch: refs/heads/feature-branch-AMBARI-21307
Commit: 03273bdce970a1282d89056aa48dc2ffe3b7b712
Parents: a3a8afc
Author: Tim Thorpe <ttho...@apache.org>
Authored: Tue Oct 10 13:54:01 2017 -0700
Committer: Tim Thorpe <ttho...@apache.org>
Committed: Tue Oct 10 13:54:01 2017 -0700

----------------------------------------------------------------------
 .../controller/AmbariManagementHelper.java      |   2 +-
 .../ambari/server/stack/ExtensionHelper.java    |  57 ++-
 .../apache/ambari/server/stack/StackModule.java |   4 +
 .../server/stack/StackManagerExtensionTest.java |  31 +-
 .../resources/extensions/EXT/0.2/metainfo.xml   |   2 +-
 .../resources/extensions/EXT/0.3/metainfo.xml   |   2 +-
 .../stacks_with_extensions/HDP/0.4/metainfo.xml |  22 ++
 .../HDP/0.4/repos/repoinfo.xml                  |  63 +++
 .../HDP/0.4/services/HBASE/metainfo.xml         |  26 ++
 .../0.4/services/HDFS/configuration/global.xml  | 145 +++++++
 .../services/HDFS/configuration/hadoop-env.xml  | 223 +++++++++++
 .../services/HDFS/configuration/hbase-site.xml  | 137 +++++++
 .../services/HDFS/configuration/hdfs-log4j.xml  | 199 ++++++++++
 .../services/HDFS/configuration/hdfs-site.xml   | 396 +++++++++++++++++++
 .../HDP/0.4/services/HDFS/metainfo.xml          |  30 ++
 .../0.4/services/HDFS/package/dummy-script.py   |  20 +
 .../HDP/0.4/services/HIVE/metainfo.xml          |  26 ++
 .../HDP/0.4/services/MAPREDUCE/metainfo.xml     |  23 ++
 .../HDP/0.4/services/ZOOKEEPER/metainfo.xml     |  26 ++
 19 files changed, 1425 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
index 2dd6f12..0c8edfe 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
@@ -70,7 +70,7 @@ public class AmbariManagementHelper {
    */
   public void createExtensionLink(StackManager stackManager, StackInfo 
stackInfo, ExtensionInfo extensionInfo) throws AmbariException {
     validateCreateExtensionLinkRequest(stackInfo, extensionInfo);
-    ExtensionHelper.validateCreateLink(stackInfo, extensionInfo);
+    ExtensionHelper.validateCreateLink(stackManager, stackInfo, extensionInfo);
     ExtensionLinkEntity linkEntity = createExtensionLinkEntity(stackInfo, 
extensionInfo);
     stackManager.linkStackToExtension(stackInfo, extensionInfo);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java
index 86e532a..91dc870 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java
@@ -27,6 +27,8 @@ import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.StackInfo;
 import org.apache.ambari.server.state.stack.ExtensionMetainfoXml;
 import org.apache.ambari.server.utils.VersionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * An extension version is like a stack version but it contains custom 
services.  Linking an extension
@@ -35,6 +37,8 @@ import org.apache.ambari.server.utils.VersionUtils;
  */
 public class ExtensionHelper {
 
+  private final static Logger LOG = 
LoggerFactory.getLogger(ExtensionHelper.class);
+
   public static void validateDeleteLink(Clusters clusters, StackInfo stack, 
ExtensionInfo extension) throws AmbariException {
     validateNotRequiredExtension(stack, extension);
     validateServicesNotInstalled(clusters, stack, extension);
@@ -62,9 +66,9 @@ public class ExtensionHelper {
     }
   }
 
-  public static void validateCreateLink(StackInfo stack, ExtensionInfo 
extension) throws AmbariException {
+  public static void validateCreateLink(StackManager stackManager, StackInfo 
stack, ExtensionInfo extension) throws AmbariException {
     validateSupportedStackVersion(stack, extension);
-    validateServiceDuplication(stack, extension);
+    validateServiceDuplication(stackManager, stack, extension);
     validateRequiredExtensions(stack, extension);
   }
 
@@ -88,15 +92,24 @@ public class ExtensionHelper {
     throw new AmbariException(message);
   }
 
-  private static void validateServiceDuplication(StackInfo stack, 
ExtensionInfo extension) throws AmbariException {
+  private static void validateServiceDuplication(StackManager stackManager, 
StackInfo stack, ExtensionInfo extension) throws AmbariException {
+    LOG.debug("Looking for duplicate services");
     for (ServiceInfo service : extension.getServices()) {
+      LOG.debug("Looking for duplicate service " + service.getName());
       if (service != null) {
         ServiceInfo stackService = null;
         try {
           stackService = stack.getService(service.getName());
+          if (stackService != null) {
+            LOG.debug("Found service " + service.getName());
+            if (isInheritedExtensionService(stackManager, stack, 
service.getName(), extension.getName())) {
+              stackService = null;
+            }
+          }
         }
         catch (Exception e) {
           //Eat the exception
+          LOG.error("Error validating service duplication", e);
         }
         if (stackService != null) {
           String message = "Existing service is included in extension"
@@ -112,6 +125,44 @@ public class ExtensionHelper {
     }
   }
 
+  private static boolean isInheritedExtensionService(StackManager 
stackManager, StackInfo stack, String serviceName, String extensionName) {
+    // Check if service is from an extension at the current stack level, if so 
then it isn't inherited from its parent stack version
+    if (isExtensionService(stack, serviceName, extensionName)) {
+      LOG.debug("Service is at requested stack/version level " + serviceName);
+      return false;
+    }
+
+    return isExtensionService(stackManager, stack.getName(), 
stack.getParentStackVersion(), serviceName, extensionName);
+  }
+
+  private static boolean isExtensionService(StackManager stackManager, String 
stackName, String stackVersion, String serviceName, String extensionName) {
+    LOG.debug("Checking at stack/version " + stackName + "/" + stackVersion);
+    StackInfo stack = stackManager.getStack(stackName, stackVersion);
+
+    if (stack == null) {
+      LOG.warn("Stack/version not found " + stackName + "/" + stackVersion);
+      return false;
+    }
+
+    if (isExtensionService(stack, serviceName, extensionName)) {
+      LOG.debug("Stack/version " + stackName + "/" + stackVersion + " contains 
service " + serviceName);
+      return true;
+    }
+    else {
+      return isExtensionService(stackManager, stackName, 
stack.getParentStackVersion(), serviceName, extensionName);
+    }
+  }
+
+  private static boolean isExtensionService(StackInfo stack, String 
serviceName, String extensionName) {
+    ExtensionInfo extension = stack.getExtension(extensionName);
+    if (extension == null) {
+      LOG.debug("Extension not found " + extensionName);
+      return false;
+    }
+
+    return extension.getService(serviceName) != null;
+  }
+
   private static void validateRequiredExtensions(StackInfo stack, 
ExtensionInfo extension) throws AmbariException {
     for (ExtensionMetainfoXml.Extension requiredExtension : 
extension.getExtensions()) {
       if (requiredExtension != null) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java 
b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
index 520764d..b109331 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
@@ -198,6 +198,7 @@ public class StackModule extends BaseModule<StackModule, 
StackInfo> implements V
     if (parentVersion != null) {
       mergeStackWithParent(parentVersion, allStacks, commonServices, 
extensions);
     }
+
     for (ExtensionInfo extension : stackInfo.getExtensions()) {
       String extensionKey = extension.getName() + StackManager.PATH_DELIMITER 
+ extension.getVersion();
       ExtensionModule extensionModule = extensions.get(extensionKey);
@@ -402,6 +403,9 @@ public class StackModule extends BaseModule<StackModule, 
StackInfo> implements V
 
   private void addExtensionServices() throws AmbariException {
     for (ExtensionModule extension : extensionModules.values()) {
+      for (Map.Entry<String, ServiceModule> entry : 
extension.getServiceModules().entrySet()) {
+        serviceModules.put(entry.getKey(), entry.getValue());
+      }
       stackInfo.addExtension(extension.getModuleInfo());
     }
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
index 20c8f40..cef30b5 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
@@ -69,6 +69,9 @@ public class StackManagerExtensionTest  {
     StackEntity stack3 = new StackEntity();
     stack3.setStackName("HDP");
     stack3.setStackVersion("0.3");
+    StackEntity stack4 = new StackEntity();
+    stack4.setStackName("HDP");
+    stack4.setStackVersion("0.4");
     ExtensionEntity extension1 = new ExtensionEntity();
     extension1.setExtensionName("EXT");
     extension1.setExtensionVersion("0.1");
@@ -78,19 +81,28 @@ public class StackManagerExtensionTest  {
     ExtensionEntity extension3 = new ExtensionEntity();
     extension3.setExtensionName("EXT");
     extension3.setExtensionVersion("0.3");
+    ExtensionLinkEntity link1 = new ExtensionLinkEntity();
+    link1.setLinkId(new Long(-1));
+    link1.setStack(stack1);
+    link1.setExtension(extension1);
     List<ExtensionLinkEntity> list = new ArrayList<>();
+    List<ExtensionLinkEntity> linkList = new ArrayList<>();
+    linkList.add(link1);
 
     expect(stackDao.find("HDP", "0.1")).andReturn(stack1).atLeastOnce();
     expect(stackDao.find("HDP", "0.2")).andReturn(stack2).atLeastOnce();
     expect(stackDao.find("HDP", "0.3")).andReturn(stack3).atLeastOnce();
+    expect(stackDao.find("HDP", "0.4")).andReturn(stack3).atLeastOnce();
     expect(extensionDao.find("EXT", 
"0.1")).andReturn(extension1).atLeastOnce();
     expect(extensionDao.find("EXT", 
"0.2")).andReturn(extension2).atLeastOnce();
     expect(extensionDao.find("EXT", 
"0.3")).andReturn(extension3).atLeastOnce();
 
+    expect(linkDao.findByStack("HDP", 
"0.1")).andReturn(linkList).atLeastOnce();
     expect(linkDao.findByStack(EasyMock.anyObject(String.class),
             EasyMock.anyObject(String.class))).andReturn(list).atLeastOnce();
 
     expect(linkDao.findByStackAndExtension("HDP", "0.2", "EXT", 
"0.2")).andReturn(null).atLeastOnce();
+    expect(linkDao.findByStackAndExtension("HDP", "0.1", "EXT", 
"0.1")).andReturn(link1).atLeastOnce();
 
     replay(actionMetadata, stackDao, metaInfoDao, osFamily, extensionDao, 
linkDao); //linkEntity
 
@@ -144,21 +156,34 @@ public class StackManagerExtensionTest  {
     assertNotNull(themes);
     assertTrue("Number of themes is " + themes.size(), themes.size() == 0);
 
-    StackInfo stack = stackManager.getStack("HDP", "0.2");
+    StackInfo stack = stackManager.getStack("HDP", "0.1");
     assertNotNull(stack.getService("OOZIE2"));
     oozie = stack.getService("OOZIE2");
     assertNotNull("Package dir is " + oozie.getServicePackageFolder(), 
oozie.getServicePackageFolder());
     assertTrue("Package dir is " + oozie.getServicePackageFolder(), 
oozie.getServicePackageFolder().contains("extensions/EXT/0.1/services/OOZIE2/package"));
-    assertEquals(oozie.getVersion(), "4.0.0");
+    assertEquals(oozie.getVersion(), "3.2.0");
 
     assertTrue("Extensions found: " + stack.getExtensions().size(), 
stack.getExtensions().size() == 1);
     extension = stack.getExtensions().iterator().next();
     assertEquals("Extension name: " + extension.getName(), 
extension.getName(), "EXT");
-    assertEquals("Extension version: " + extension.getVersion(), 
extension.getVersion(), "0.3");
+    assertEquals("Extension version: " + extension.getVersion(), 
extension.getVersion(), "0.1");
+
+    stack = stackManager.getStack("HDP", "0.2");
+    assertTrue("Extensions found: " + stack.getExtensions().size(), 
stack.getExtensions().size() == 0);
 
     stack = stackManager.getStack("HDP", "0.3");
     assertTrue("Extensions found: " + stack.getExtensions().size(), 
stack.getExtensions().size() == 1);
     extension = stack.getExtensions().iterator().next();
+    assertNotNull(extension.getService("OOZIE2"));
+    oozie = extension.getService("OOZIE2");
+    assertEquals(oozie.getVersion(), "4.0.0");
+
+    assertEquals("Extension name: " + extension.getName(), 
extension.getName(), "EXT");
+    assertEquals("Extension version: " + extension.getVersion(), 
extension.getVersion(), "0.3");
+
+    stack = stackManager.getStack("HDP", "0.4");
+    assertTrue("Extensions found: " + stack.getExtensions().size(), 
stack.getExtensions().size() == 1);
+    extension = stack.getExtensions().iterator().next();
     assertEquals("Extension name: " + extension.getName(), 
extension.getName(), "EXT");
     assertEquals("Extension version: " + extension.getVersion(), 
extension.getVersion(), "0.3");
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml 
b/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
index 0d37b3e..c95a20f 100644
--- a/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
+++ b/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
@@ -25,7 +25,7 @@
     <min-stack-versions>
       <stack>
         <name>HDP</name>
-        <version>0.2</version>
+        <version>0.3</version>
       </stack>
     </min-stack-versions>
   </prerequisites>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml 
b/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
index d827314..1b6ce73 100644
--- a/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
+++ b/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
@@ -25,7 +25,7 @@
     <min-stack-versions>
       <stack>
         <name>HDP</name>
-        <version>0.2</version>
+        <version>0.3</version>
       </stack>
     </min-stack-versions>
   </prerequisites>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/metainfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/metainfo.xml 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/metainfo.xml
new file mode 100644
index 0000000..3b4897f
--- /dev/null
+++ 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/metainfo.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <versions>
+         <upgrade>0.3</upgrade>
+    </versions>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/repos/repoinfo.xml
 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/repos/repoinfo.xml
new file mode 100644
index 0000000..9b3b1c7
--- /dev/null
+++ 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/repos/repoinfo.xml
@@ -0,0 +1,63 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <os family="redhat6">
+    <repo>
+      
<baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos6</baseurl>
+      <repoid>HDP-1.1.1.16</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      
<baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.15</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+      <mirrorslist></mirrorslist>
+    </repo>
+    <repo>
+      <baseurl></baseurl>
+      <repoid>epel</repoid>
+      <reponame>epel</reponame>
+      <unique>true</unique>
+      
<mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch]]></mirrorslist>
+    </repo>
+  </os>
+  <os family="centos5">
+    <repo>
+      
<baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5</baseurl>
+      <repoid>HDP-1.1.1.16</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      
<baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5</baseurl>
+      <repoid>HDP-UTILS-1.1.0.15</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+      <mirrorslist></mirrorslist>
+    </repo>
+    <repo>
+      <baseurl></baseurl>
+      <repoid>epel</repoid>
+      <reponame>epel</reponame>
+      <unique>true</unique>
+      
<mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-5&arch=$basearch]]></mirrorslist>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HBASE/metainfo.xml
 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HBASE/metainfo.xml
new file mode 100644
index 0000000..48123f0
--- /dev/null
+++ 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HBASE/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <extends>common-services/HBASE/1.0</extends>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/global.xml
 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/global.xml
new file mode 100644
index 0000000..bcab577
--- /dev/null
+++ 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/global.xml
@@ -0,0 +1,145 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>namenode_host</name>
+    <value></value>
+    <description>NameNode Host.</description>
+  </property>
+  <property>
+    <name>dfs_name_dir</name>
+    <value>/hadoop/hdfs/namenode</value>
+    <description>NameNode Directories.</description>
+  </property>
+  <property>
+    <name>snamenode_host</name>
+    <value></value>
+    <description>Secondary NameNode.</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_dir</name>
+    <value>/hadoop/hdfs/namesecondary</value>
+    <description>Secondary NameNode checkpoint dir.</description>
+  </property>
+  <property>
+    <name>datanode_hosts</name>
+    <value></value>
+    <description>List of Datanode Hosts.</description>
+  </property>
+  <property>
+    <name>dfs_data_dir</name>
+    <value>/hadoop/hdfs/data</value>
+    <description>Data directories for Data Nodes.</description>
+  </property>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>dfs_webhdfs_enabled</name>
+    <value>true</value>
+    <description>WebHDFS enabled</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>Default size of Java new generation for NameNode (Java option 
-XX:NewSize) Note: The value of NameNode new generation size (default size of 
Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of 
maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize 
property is 1/8 the value of maximum heap size (-Xmx).</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>640</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
+  <property>
+    <name>datanode_du_reserved</name>
+    <value>1</value>
+    <description>Reserved space for HDFS</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>dfs_datanode_failed_volume_tolerated</name>
+    <value>0</value>
+    <description>DataNode volumes failure toleration</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_period</name>
+    <value>21600</value>
+    <description>HDFS Maximum Checkpoint Delay</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_size</name>
+    <value>0.5</value>
+    <description>FS Checkpoint Size.</description>
+  </property>
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>keytab_path</name>
+    <value>/etc/security/keytabs</value>
+    <description>KeyTab Directory.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hadoop-env.xml
 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hadoop-env.xml
new file mode 100644
index 0000000..8fb8c7f
--- /dev/null
+++ 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hadoop-env.xml
@@ -0,0 +1,223 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>Default size of Java new generation for NameNode (Java option 
-XX:NewSize) Note: The value of NameNode new generation size (default size of 
Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of 
maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize 
property is 1/8 the value of maximum heap size (-Xmx).</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>200</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <value>users</value>
+    <description>Proxy user group.</description>
+  </property>
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>hdfs_user</name>
+    <value>hdfs</value>
+    <description>User and Groups.</description>
+  </property>
+  <property>
+    <name>ignore_groupsusers_create</name>
+    <value>false</value>
+    <description>Whether to ignores failures on users and group 
creation</description>
+  </property>
+  <property>
+    <name>smokeuser</name>
+    <value>ambari-qa</value>
+    <description>User executing service checks</description>
+  </property>
+  <property>
+    <name>user_group</name>
+    <value>hadoop</value>
+    <description>Proxy user group.</description>
+  </property>
+
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <description>hadoop-env.sh content</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-XX:PermSize={{namenode_opt_permsize}} 
-XX:MaxPermSize={{namenode_opt_maxpermsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS 
-Dmapred.audit.logger=INFO,MRAUDIT 
-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} 
-Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console 
${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-XX:CMSInitiatingOccupancyFraction=70 
-XX:+UseCMSInitiatingOccupancyOnly -XX:ConcGCThreads=4 -XX:+UseConcMarkSweepGC 
-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS 
${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-XX:PermSize={{namenode_opt_permsize}} 
-XX:MaxPermSize={{namenode_opt_maxpermsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER={{hdfs_user}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export 
HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+if [ -d "/usr/lib/tez" ]; then
+  export 
HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export 
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hbase-site.xml
 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hbase-site.xml
new file mode 100644
index 0000000..5024e85
--- /dev/null
+++ 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hbase-site.xml
@@ -0,0 +1,137 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.regionserver.msginterval</name>
+    <value>1000</value>
+    <description>Interval between messages from the RegionServer to HMaster
+    in milliseconds.  Default is 15. Set this value low if you want unit
+    tests to be responsive.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.pause</name>
+    <value>5000</value>
+    <description>General client pause value.  Used mostly as value to wait
+    before running a retry of a failed get, region lookup, etc.</description>
+  </property>
+  <property>
+    <name>hbase.master.meta.thread.rescanfrequency</name>
+    <value>10000</value>
+    <description>How long the HMaster sleeps (in milliseconds) between scans of
+    the root and meta tables.
+    </description>
+  </property>
+  <property>
+    <name>hbase.server.thread.wakefrequency</name>
+    <value>1000</value>
+    <description>Time to sleep in between searches for work (in milliseconds).
+    Used as sleep interval by service threads such as META scanner and log 
roller.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value>5</value>
+    <description>Count of RPC Server instances spun up on RegionServers
+    Same property is used by the HMaster for count of master handlers.
+    Default is 10.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.period</name>
+    <value>6000</value>
+    <description>Length of time the master will wait before timing out a region
+    server lease. Since region servers report in every second (see above), this
+    value has been reduced so that the master will notice a dead region server
+    sooner. The default is 30 seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase master web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase regionserver web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port.auto</name>
+    <value>true</value>
+    <description>Info server auto port bind. Enables automatic port
+    search if hbase.regionserver.info.port is already in use.
+    Enabled for testing to run multiple tests on one machine.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.thread.wakefrequency</name>
+    <value>3000</value>
+    <description>The interval between checks for expired region server leases.
+    This value has been reduced due to the other reduced values above so that
+    the master will notice a dead region server sooner. The default is 15 
seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.optionalcacheflushinterval</name>
+    <value>10000</value>
+    <description>
+    Amount of time to wait since the last time a region was flushed before
+    invoking an optional cache flush. Default 60,000.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.safemode</name>
+    <value>false</value>
+    <description>
+    Turn on/off safe mode in region server. Always on for production, always 
off
+    for tests.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>67108864</value>
+    <description>
+    Maximum desired file size for an HRegion.  If filesize exceeds
+    value + (value / 2), the HRegion is split in two.  Default: 256M.
+
+    Keep the maximum filesize small so we split more often in tests.
+    </description>
+  </property>
+  <property>
+    <name>hadoop.log.dir</name>
+    <value>${user.dir}/../logs</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>21818</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-log4j.xml
 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-log4j.xml
new file mode 100644
index 0000000..649472d
--- /dev/null
+++ 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-log4j.xml
@@ -0,0 +1,199 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <value>
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} 
(%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p 
%c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security audit appender
+#
+hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# Rolling File Appender
+#
+
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+log4j.appender.RFA.MaxFileSize=256MB
+log4j.appender.RFA.MaxBackupIndex=10
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) 
- %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop 
Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-site.xml
 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-site.xml
new file mode 100644
index 0000000..2b979d7
--- /dev/null
+++ 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,396 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<!-- file system properties -->
+
+  <property>
+    <name>dfs.name.dir</name>
+    <!-- cluster variant -->
+    <value>/mnt/hmc/hadoop/hdfs/namenode</value>
+    <description>Determines where on the local filesystem the DFS name node
+      should store the name table.  If this is a comma-delimited list
+      of directories then the name table is replicated in all of the
+      directories, for redundancy. </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.support.append</name>
+    <value>true</value>
+    <description>to enable dfs append</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>false</value>
+    <description>to enable webhdfs</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.failed.volumes.tolerated</name>
+    <value>0</value>
+    <description>#of failed disks dn would tolerate</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.block.local-path-access.user</name>
+    <value>hbase</value>
+    <description>the user who is allowed to perform short
+    circuit reads.
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.data.dir</name>
+    <value>/mnt/hmc/hadoop/hdfs/data</value>
+    <description>Determines where on the local filesystem an DFS data node
+  should store its blocks.  If this is a comma-delimited
+  list of directories, then data will be stored in all named
+  directories, typically on different devices.
+  Directories that do not exist are ignored.
+  </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.hosts.exclude</name>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
+    <description>Names a file that contains a list of hosts that are
+    not permitted to connect to the namenode.  The full pathname of the
+    file must be specified.  If the value is empty, no hosts are
+    excluded.</description>
+  </property>
+
+  <property>
+    <name>dfs.hosts</name>
+    <value>/etc/hadoop/conf/dfs.include</value>
+    <description>Names a file that contains a list of hosts that are
+    permitted to connect to the namenode. The full pathname of the file
+    must be specified.  If the value is empty, all hosts are
+    permitted.</description>
+  </property>
+
+  <property>
+    <name>dfs.replication.max</name>
+    <value>50</value>
+    <description>Maximal block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.replication</name>
+    <value>3</value>
+    <description>Default block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in 
seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.safemode.threshold.pct</name>
+    <value>1.0f</value>
+    <description>
+        Specifies the percentage of blocks that should satisfy
+        the minimal replication requirement defined by dfs.replication.min.
+        Values less than or equal to 0 mean not to start in safe mode.
+        Values greater than 1 will make safe mode permanent.
+        </description>
+  </property>
+
+  <property>
+    <name>dfs.balance.bandwidthPerSec</name>
+    <value>6250000</value>
+    <description>
+        Specifies the maximum amount of bandwidth that each datanode
+        can utilize for the balancing purpose in term of
+        the number of bytes per second.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.address</name>
+    <value>0.0.0.0:50010</value>
+  </property>
+
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:50075</value>
+  </property>
+
+  <property>
+    <name>dfs.block.size</name>
+    <value>134217728</value>
+    <description>The default block size for new files.</description>
+  </property>
+
+  <property>
+    <name>dfs.http.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50070</value>
+<description>The name of the default file system.  Either the
+literal string "local" or a host:port for HDFS.</description>
+<final>true</final>
+</property>
+
+<property>
+<name>dfs.datanode.du.reserved</name>
+<!-- cluster variant -->
+<value>1073741824</value>
+<description>Reserved space in bytes per volume. Always leave this much space 
free for non dfs use.
+</description>
+</property>
+
+<property>
+<name>dfs.datanode.ipc.address</name>
+<value>0.0.0.0:8010</value>
+<description>
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+</description>
+</property>
+
+<property>
+<name>dfs.blockreport.initialDelay</name>
+<value>120</value>
+<description>Delay for first block report in seconds.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>40</value>
+<description>The number of server threads for the namenode.</description>
+</property>
+
+<property>
+<name>dfs.datanode.max.xcievers</name>
+<value>1024</value>
+<description>PRIVATE CONFIG VARIABLE</description>
+</property>
+
+<!-- Permissions configuration -->
+
+<property>
+<name>dfs.umaskmode</name>
+<value>077</value>
+<description>
+The octal umask used when creating files and directories.
+</description>
+</property>
+
+<property>
+<name>dfs.web.ugi</name>
+<!-- cluster variant -->
+<value>gopher,gopher</value>
+<description>The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+</description>
+</property>
+
+<property>
+<name>dfs.permissions</name>
+<value>true</value>
+<description>
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+</description>
+</property>
+
+<property>
+<name>dfs.permissions.supergroup</name>
+<value>hdfs</value>
+<description>The name of the group of super-users.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>100</value>
+<description>Added to grow Queue size so that more client connections are 
allowed</description>
+</property>
+
+<property>
+<name>ipc.server.max.response.size</name>
+<value>5242880</value>
+</property>
+<property>
+<name>dfs.block.access.token.enable</name>
+<value>true</value>
+<description>
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+<description>
+Kerberos principal name for the NameNode
+</description>
+</property>
+
+<property>
+<name>dfs.secondary.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+    <description>
+        Kerberos principal name for the secondary NameNode.
+    </description>
+  </property>
+
+
+<!--
+  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+-->
+  <property>
+    <name>dfs.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+     <description>The Kerberos principal for the host that the NameNode runs 
on.</description>
+
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+    <description>The Kerberos principal for the hostthat the secondary 
NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <!-- cluster variant -->
+    <name>dfs.secondary.http.address</name>
+    <value>hdp2.cybervisiontech.com.ua:50090</value>
+    <description>Address of secondary namenode web server</description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.https.port</name>
+    <value>50490</value>
+    <description>The https port where secondary-namenode binds</description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.principal</name>
+    <value>HTTP/_HOST@</value>
+    <description>
+      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+      HTTP SPENGO specification.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.keytab</name>
+    <value>/nn.service.keytab</value>
+    <description>
+      The Kerberos keytab file with the credentials for the
+      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.kerberos.principal</name>
+    <value>dn/_HOST@</value>
+ <description>
+        The Kerberos principal that the DataNode runs as. "_HOST" is replaced 
by the real host name.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+ <description>
+        Combined keytab file containing the namenode service and host 
principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+  <description>
+        Combined keytab file containing the namenode service and host 
principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.keytab.file</name>
+    <value>/dn.service.keytab</value>
+ <description>
+        The filename of the keytab file for the DataNode.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.https.port</name>
+    <value>50470</value>
+ <description>The https port where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.https.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50470</value>
+  <description>The https address where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir.perm</name>
+    <value>750</value>
+<description>The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.</description>
+  </property>
+
+  <property>
+  <name>dfs.access.time.precision</name>
+  <value>0</value>
+  <description>The access time for HDFS file is precise upto this value.
+               The default value is 1 hour. Setting a value of 0 disables
+               access times for HDFS.
+  </description>
+</property>
+
+<property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the 
HDFS</description>
+</property>
+
+<property>
+  <name>ipc.server.read.threadpool.size</name>
+  <value>5</value>
+  <description></description>
+</property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/metainfo.xml
 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/metainfo.xml
new file mode 100644
index 0000000..da61660
--- /dev/null
+++ 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/metainfo.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <extends>common-services/HDFS/1.0</extends>
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-policy</config-type>
+        <config-type>hdfs-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/package/dummy-script.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/package/dummy-script.py
 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/package/dummy-script.py
new file mode 100644
index 0000000..35de4bb
--- /dev/null
+++ 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/package/dummy-script.py
@@ -0,0 +1,20 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HIVE/metainfo.xml
 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HIVE/metainfo.xml
new file mode 100644
index 0000000..9c122b2
--- /dev/null
+++ 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HIVE/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <extends>common-services/HIVE/1.0</extends>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/MAPREDUCE/metainfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/MAPREDUCE/metainfo.xml
 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/MAPREDUCE/metainfo.xml
new file mode 100644
index 0000000..3b0b3d9
--- /dev/null
+++ 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/MAPREDUCE/metainfo.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>MAPREDUCE</name>
+      <extends>common-services/MAPREDUCE/1.0</extends>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/03273bdc/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/ZOOKEEPER/metainfo.xml
 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/ZOOKEEPER/metainfo.xml
new file mode 100644
index 0000000..9c8a299
--- /dev/null
+++ 
b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/ZOOKEEPER/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <extends>common-services/ZOOKEEPER/1.0</extends>
+    </service>
+  </services>
+</metainfo>

Reply via email to