http://git-wip-us.apache.org/repos/asf/ambari/blob/ffb4d3b8/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/tests/LocalAmbariServer.java
----------------------------------------------------------------------
diff --git 
a/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/tests/LocalAmbariServer.java
 
b/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/tests/LocalAmbariServer.java
new file mode 100644
index 0000000..24b4ccf
--- /dev/null
+++ 
b/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/tests/LocalAmbariServer.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.functionaltests.server;
+
+import com.google.inject.Inject;
+import com.google.inject.persist.PersistService;
+import org.apache.ambari.server.controller.AmbariServer;
+import org.apache.ambari.server.orm.GuiceJpaInitializer;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import com.google.inject.Injector;
+
+/**
+* Wrap AmbariServer as a testable unit.
+*/
+public class LocalAmbariServer implements Runnable {
+
+  private static Log LOG = LogFactory.getLog(AmbariServer.class);
+
+  /**
+   * Actual ambari server instance.
+   */
+  private AmbariServer ambariServer = null;
+
+  @Inject
+  private Injector injector;
+
+  public LocalAmbariServer() {}
+
+  /**
+   * Thread entry point.
+   */
+  @Override
+  public void run(){
+    try {
+      startServer();
+    }
+    catch (Exception ex) {
+      LOG.info("Exception received ", ex);
+      throw new RuntimeException(ex);
+    }
+  }
+
+  /**
+   * Configures the Guice injector to use the in-memory test DB
+   * and attempts to start an instance of AmbariServer.
+   *
+   * @throws Exception
+   */
+  private void startServer() throws Exception {
+    try {
+      LOG.info("Attempting to start ambari server...");
+
+      AmbariServer.setupProxyAuth();
+      injector.getInstance(GuiceJpaInitializer.class);
+      ambariServer = injector.getInstance(AmbariServer.class);
+      ambariServer.initViewRegistry();
+      ambariServer.run();
+    } catch (InterruptedException ex) {
+      LOG.info(ex);
+    } catch (Throwable t) {
+      LOG.error("Failed to run the Ambari Server", t);
+      stopServer();
+      throw t;
+    }
+  }
+
+  /**
+   * Attempts to stop the test AmbariServer instance.
+   * @throws Exception
+   */
+  public void stopServer() throws Exception {
+    LOG.info("Stopping ambari server...");
+
+    if (ambariServer != null) {
+      ambariServer.stop();
+    }
+
+    if (injector != null) {
+        injector.getInstance(PersistService.class).stop();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ffb4d3b8/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/tests/ServerTestBase.java
----------------------------------------------------------------------
diff --git 
a/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/tests/ServerTestBase.java
 
b/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/tests/ServerTestBase.java
new file mode 100644
index 0000000..b915977
--- /dev/null
+++ 
b/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/tests/ServerTestBase.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.funtest.server.tests;
+
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import org.apache.ambari.funtest.server.LocalAmbariServer;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.funtest.server.orm.InMemoryDefaultTestModule;
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.httpclient.HttpClient;
+import org.apache.commons.httpclient.methods.GetMethod;
+import org.junit.After;
+import org.junit.Before;
+
+import java.io.IOException;
+import java.util.Properties;
+
+/**
+ * Base test infrastructure.
+ */
+public class ServerTestBase {
+    /**
+     * Run the ambari server on a thread.
+     */
+    protected Thread serverThread = null;
+
+    /**
+     * Instance of the local ambari server, which wraps the actual
+     * ambari server with test configuration.
+     */
+    protected LocalAmbariServer server = null;
+
+    /**
+     * Server port
+     */
+    protected static int serverPort = 9995;
+
+    /**
+     * Server agent port
+     */
+    protected static int serverAgentPort = 9000;
+
+    /**
+     * Guice injector using an in-memory DB.
+     */
+    protected Injector injector = null;
+
+    /**
+     * Server URL
+     */
+    protected static String SERVER_URL_FORMAT = "http://localhost:%d";;
+
+    /**
+     * Start our local server on a thread so that it does not block.
+     *
+     * @throws Exception
+     */
+    @Before
+    public void setup() throws Exception {
+        InMemoryDefaultTestModule testModule = new InMemoryDefaultTestModule();
+        Properties properties = testModule.getProperties();
+        properties.setProperty(Configuration.AGENT_USE_SSL, "false");
+        properties.setProperty(Configuration.CLIENT_API_PORT_KEY, 
Integer.toString(serverPort));
+        properties.setProperty(Configuration.SRVR_ONE_WAY_SSL_PORT_KEY, 
Integer.toString(serverAgentPort));
+        String tmpDir = System.getProperty("java.io.tmpdir");
+        
testModule.getProperties().setProperty(Configuration.SRVR_KSTR_DIR_KEY, tmpDir);
+        injector = Guice.createInjector(testModule);
+        server = injector.getInstance(LocalAmbariServer.class);
+        serverThread = new Thread(server);
+        serverThread.start();
+        waitForServer();
+    }
+
+    private String getUserName() {
+        return "admin";
+    }
+
+    private String getPassword() {
+        return "admin";
+    }
+
+    protected String getBasicAuthentication() {
+        String authString = getUserName() + ":" + getPassword();
+        byte[] authEncBytes = Base64.encodeBase64(authString.getBytes());
+        String authStringEnc = new String(authEncBytes);
+
+        return "Basic " + authStringEnc;
+    }
+
+    /**
+     * Waits for the local server until it is ready to accept requests.
+     *
+     * @throws Exception
+     */
+    private void waitForServer() throws Exception {
+        int count = 1;
+
+        while (!isServerUp()) {
+            serverThread.join(count * 10000);     // Give a few seconds for 
the ambari server to start up
+            //count += 1; // progressive back off
+            //count *= 2; // exponential back off
+        }
+    }
+
+    /**
+     * Attempt to query the server for the stack. If the server is up,
+     * we will get a response. If not, an exception will be thrown.
+     *
+     * @return - True if the local server is responsive to queries.
+     *           False, otherwise.
+     */
+    private boolean isServerUp() {
+        String apiPath = "/api/v1/stacks";
+
+        String apiUrl = String.format(SERVER_URL_FORMAT, serverPort) + apiPath;
+        HttpClient httpClient = new HttpClient();
+        GetMethod getMethod = new GetMethod(apiUrl);
+
+        try {
+            getMethod.addRequestHeader("Authorization", 
getBasicAuthentication());
+            getMethod.addRequestHeader("X-Requested-By", "ambari");
+            int statusCode = httpClient.executeMethod(getMethod);
+            String response = getMethod.getResponseBodyAsString();
+
+            return true;
+        } catch (IOException ex) {
+
+        } finally {
+            getMethod.releaseConnection();
+        }
+
+        return false;
+    }
+
+    /**
+     * Shut down the local server.
+     *
+     * @throws Exception
+     */
+    @After
+    public void teardown() throws Exception {
+        if (serverThread != null) {
+            serverThread.interrupt();
+        }
+        if (server != null) {
+            server.stopServer();
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ffb4d3b8/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/tests/StartStopServerTest.java
----------------------------------------------------------------------
diff --git 
a/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/tests/StartStopServerTest.java
 
b/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/tests/StartStopServerTest.java
new file mode 100644
index 0000000..37b1123
--- /dev/null
+++ 
b/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/tests/StartStopServerTest.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.funtest.server.tests;
+
+import org.apache.commons.codec.binary.Base64;
+import org.junit.Test;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertEquals;
+
+import org.apache.commons.httpclient.HttpClient;
+import org.apache.commons.httpclient.methods.GetMethod;
+
+import java.io.StringReader;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonParser;
+import com.google.gson.stream.JsonReader;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonArray;
+
+import java.io.IOException;
+
+import org.apache.http.HttpStatus;
+
+/**
+ * Simple test that starts the local ambari server,
+ * tests it's status and shuts down the server.
+ */
+public class StartStopServerTest extends ServerTestBase {
+  /**
+   * Waits for the ambari server to startup and then checks it's
+   * status by querying /api/v1/stacks (does not touch the DB)
+   */
+  @Test
+  public void testServerStatus() throws IOException {
+    /**
+     * Query the ambari server for the list of stacks.
+     * A successful GET returns the list of stacks.
+     * We should get a json like:
+     * {
+     *   "href" : "http://localhost:9995/api/v1/stacks";,
+     *   "items" : [
+     *   {
+     *     "href" : "http://localhost:9995/api/v1/stacks/HDP";,
+     *     "Stacks" : {
+     *     "stack_name" : "HDP"
+     *     }
+     *   }
+     *  ]
+     * }
+     */
+
+    /**
+     * Test URL for GETting the status of the ambari server
+     */
+    String stacksPath = "/api/v1/stacks";
+    String stacksUrl = String.format(SERVER_URL_FORMAT, serverPort) + 
stacksPath;
+    HttpClient httpClient = new HttpClient();
+    GetMethod getMethod = new GetMethod(stacksUrl);
+
+    try {
+      getMethod.addRequestHeader("Authorization", getBasicAuthentication());
+      getMethod.addRequestHeader("X-Requested-By", "ambari");
+      int statusCode = httpClient.executeMethod(getMethod);
+
+      assertEquals(HttpStatus.SC_OK, statusCode); // HTTP status code 200
+
+      String responseBody = getMethod.getResponseBodyAsString();
+
+      assertTrue(responseBody != null); // Make sure response body is valid
+
+      JsonElement jsonElement = new JsonParser().parse(new JsonReader(new 
StringReader(responseBody)));
+
+      assertTrue (jsonElement != null); // Response was a JSON string
+
+      JsonObject jsonObject = jsonElement.getAsJsonObject();
+
+      assertTrue (jsonObject.has("items"));  // Should have "items" entry
+
+      JsonArray stacksArray = jsonObject.get("items").getAsJsonArray();
+
+      assertTrue (stacksArray.size() > 0); // Should have at least one stack
+
+    } finally {
+      getMethod.releaseConnection();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ffb4d3b8/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/utils/ClusterUtils.java
----------------------------------------------------------------------
diff --git 
a/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/utils/ClusterUtils.java
 
b/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/utils/ClusterUtils.java
new file mode 100644
index 0000000..9799892
--- /dev/null
+++ 
b/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/utils/ClusterUtils.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.funtest.server.utils;
+
+
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import org.apache.ambari.funtest.server.ClusterConfigParams;
+import org.apache.ambari.funtest.server.ConnectionParams;
+import org.apache.ambari.funtest.server.WebRequest;
+import org.apache.ambari.funtest.server.WebResponse;
+import 
org.apache.ambari.funtest.server.api.cluster.AddDesiredConfigurationWebRequest;
+import org.apache.ambari.funtest.server.api.cluster.CreateClusterWebRequest;
+import 
org.apache.ambari.funtest.server.api.cluster.CreateConfigurationWebRequest;
+import org.apache.ambari.funtest.server.api.cluster.GetRequestStatusWebRequest;
+import org.apache.ambari.funtest.server.api.host.AddHostWebRequest;
+import org.apache.ambari.funtest.server.api.host.RegisterHostWebRequest;
+import org.apache.ambari.funtest.server.api.service.AddServiceWebRequest;
+import org.apache.ambari.funtest.server.api.service.InstallServiceWebRequest;
+import 
org.apache.ambari.funtest.server.api.servicecomponent.AddServiceComponentWebRequest;
+import 
org.apache.ambari.funtest.server.api.servicecomponenthost.BulkAddServiceComponentHostsWebRequest;
+import 
org.apache.ambari.funtest.server.api.servicecomponenthost.BulkSetServiceComponentHostStateWebRequest;
+import org.apache.ambari.server.actionmanager.HostRoleStatus;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.State;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+public class ClusterUtils {
+
+    @Inject
+    private Injector injector;
+
+    public void createSampleCluster(ConnectionParams serverParams) throws 
Exception {
+        WebResponse response = null;
+        JsonElement jsonResponse;
+        String clusterName = "c1";
+        String hostName = "host1";
+        String clusterVersion = "HDP-2.2.0";
+
+        /**
+         * Register a host
+         */
+        if (injector == null) {
+            jsonResponse =  RestApiUtils.executeRequest(new 
RegisterHostWebRequest(serverParams, hostName));
+        }
+        else {
+            /**
+             * Hack: Until we figure out how to get the agent servlet going,
+             * register a host directly using the Clusters class.
+             */
+            Clusters clusters = injector.getInstance(Clusters.class);
+            clusters.addHost(hostName);
+            Host host1 = clusters.getHost(hostName);
+            Map<String, String> hostAttributes = new HashMap<String, String>();
+            hostAttributes.put("os_family", "redhat");
+            hostAttributes.put("os_release_version", "6.3");
+            host1.setHostAttributes(hostAttributes);
+            host1.persist();
+        }
+
+        /**
+         * Create a cluster
+         */
+        jsonResponse = RestApiUtils.executeRequest(new 
CreateClusterWebRequest(serverParams, clusterName, clusterVersion));
+
+        /**
+         * Add the registered host to the new cluster
+         */
+        jsonResponse =  RestApiUtils.executeRequest(new 
AddHostWebRequest(serverParams, clusterName, hostName));
+
+        /**
+         * Create and add a configuration to our cluster
+         */
+
+        String configType = "test-hadoop-env";
+        String configTag = "version1";
+        ClusterConfigParams configParams = new ClusterConfigParams();
+        configParams.setClusterName(clusterName);
+        configParams.setConfigType(configType);
+        configParams.setConfigTag(configTag);
+        configParams.setProperties(new HashMap<String, String>() {{
+            put("fs.default.name", "localhost:9995");
+        }});
+
+        jsonResponse = RestApiUtils.executeRequest(new 
CreateConfigurationWebRequest(serverParams, configParams));
+
+        /**
+         * Apply the desired configuration to our cluster
+         */
+        jsonResponse = RestApiUtils.executeRequest(new 
AddDesiredConfigurationWebRequest(serverParams, configParams));
+
+        /**
+         * Add a service to the cluster
+         */
+
+        String serviceName = "HDFS";
+        jsonResponse = RestApiUtils.executeRequest(new 
AddServiceWebRequest(serverParams, clusterName, serviceName));
+
+        String [] componentNames = new String [] {"NAMENODE", "DATANODE", 
"SECONDARY_NAMENODE"};
+
+        /**
+         * Add components to the service
+         */
+        for (String componentName : componentNames) {
+            jsonResponse = RestApiUtils.executeRequest(new 
AddServiceComponentWebRequest(serverParams, clusterName,
+                    serviceName, componentName));
+        }
+
+        /**
+         * Install the service
+         */
+        jsonResponse = RestApiUtils.executeRequest(new 
InstallServiceWebRequest(serverParams, clusterName, serviceName));
+
+        /**
+         * Add components to the hostß
+         */
+
+        jsonResponse = RestApiUtils.executeRequest(new 
BulkAddServiceComponentHostsWebRequest(serverParams, clusterName,
+                Arrays.asList(hostName), Arrays.asList(componentNames)));
+
+        /**
+         * Install the service component hosts
+         */
+        jsonResponse = RestApiUtils.executeRequest(new 
BulkSetServiceComponentHostStateWebRequest(serverParams,
+                    clusterName, State.INIT, State.INSTALLED));
+        int requestId = parseRequestId(jsonResponse);
+        RequestStatusPoller.poll(serverParams, clusterName, requestId);
+
+        /**
+         * Start the service component hosts
+         */
+
+        jsonResponse = RestApiUtils.executeRequest(new 
BulkSetServiceComponentHostStateWebRequest(serverParams,
+                clusterName, State.INSTALLED, State.STARTED));
+        requestId = parseRequestId(jsonResponse);
+        RequestStatusPoller.poll(serverParams, clusterName, requestId);
+
+        /**
+         * Start the service
+         */
+        //jsonResponse = RestApiUtils.executeRequest(new 
StartServiceWebRequest(serverParams, clusterName, serviceName));
+    }
+
+    /**
+     * Parses a JSON response string for  { "Requests" : { "id" : "2" } }
+     *
+     * @param jsonResponse
+     * @return - request id
+     * @throws IllegalArgumentException
+     */
+    private static int parseRequestId(JsonElement jsonResponse) throws 
IllegalArgumentException {
+        if (jsonResponse.isJsonNull()) {
+            throw new IllegalArgumentException("jsonResponse with request id 
expected.");
+        }
+
+        JsonObject jsonObject = jsonResponse.getAsJsonObject();
+        int requestId = 
jsonObject.get("Requests").getAsJsonObject().get("id").getAsInt();
+        return requestId;
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ffb4d3b8/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/utils/RequestStatusPoller.java
----------------------------------------------------------------------
diff --git 
a/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/utils/RequestStatusPoller.java
 
b/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/utils/RequestStatusPoller.java
new file mode 100644
index 0000000..034eae2
--- /dev/null
+++ 
b/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/utils/RequestStatusPoller.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.funtest.server.utils;
+
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import org.apache.ambari.funtest.server.ConnectionParams;
+import org.apache.ambari.funtest.server.WebRequest;
+import org.apache.ambari.funtest.server.api.cluster.GetRequestStatusWebRequest;
+import org.apache.ambari.server.actionmanager.HostRoleStatus;
+
+/**
+ * Polls the status of a service component host request.
+ */
+class RequestStatusPoller implements Runnable {
+    private HostRoleStatus hostRoleStatus;
+    private ConnectionParams serverParams;
+    private String clusterName;
+    private int requestId;
+
+    public RequestStatusPoller(ConnectionParams serverParams, String 
clusterName, int requestId) {
+        this.hostRoleStatus = HostRoleStatus.IN_PROGRESS;
+        this.serverParams = serverParams;
+        this.clusterName = clusterName;
+        this.requestId = requestId;
+    }
+
+    public HostRoleStatus getHostRoleStatus() {
+        return this.hostRoleStatus;
+    }
+
+    public static boolean poll(ConnectionParams serverParams, String 
clusterName, int requestId) throws Exception {
+        RequestStatusPoller poller = new RequestStatusPoller(serverParams, 
clusterName, requestId);
+        Thread pollerThread = new Thread(poller);
+        pollerThread.start();
+        pollerThread.join();
+        if (poller.getHostRoleStatus() == HostRoleStatus.COMPLETED)
+            return true;
+
+        return false;
+    }
+
+    @Override
+    public void run() {
+        int retryCount = 5;
+        while (true) {
+            JsonElement jsonResponse;
+
+            try {
+                WebRequest webRequest = new 
GetRequestStatusWebRequest(serverParams, clusterName, requestId);
+                jsonResponse = RestApiUtils.executeRequest(webRequest);
+            } catch (Exception ex) {
+                throw new RuntimeException(ex);
+            }
+            if (!jsonResponse.isJsonNull()) {
+                JsonObject jsonObj = jsonResponse.getAsJsonObject();
+                JsonObject jsonRequestsObj = 
jsonObj.getAsJsonObject("Requests");
+                String requestStatus = 
jsonRequestsObj.get("request_status").getAsString();
+                hostRoleStatus = HostRoleStatus.valueOf(requestStatus);
+
+                if (hostRoleStatus == HostRoleStatus.COMPLETED ||
+                        hostRoleStatus == HostRoleStatus.ABORTED ||
+                        hostRoleStatus == HostRoleStatus.TIMEDOUT ||
+                        retryCount == 0)
+                    break;
+            }
+
+            try {
+                Thread.sleep(5000);
+            } catch (InterruptedException ex) {
+                break;
+            }
+
+            retryCount--;
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ffb4d3b8/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/utils/RestApiUtils.java
----------------------------------------------------------------------
diff --git 
a/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/utils/RestApiUtils.java
 
b/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/utils/RestApiUtils.java
new file mode 100644
index 0000000..65e1f7c
--- /dev/null
+++ 
b/ambari-funtest/src/test/java/org/apache/ambari/funtest/server/utils/RestApiUtils.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.funtest.server.utils;
+
+import com.google.gson.JsonElement;
+import com.google.gson.JsonParser;
+import com.google.gson.stream.JsonReader;
+import org.apache.ambari.funtest.server.WebRequest;
+import org.apache.ambari.funtest.server.WebResponse;
+import org.apache.commons.httpclient.HttpStatus;
+
+import java.io.StringReader;
+
+/**
+ * Helper that executes a web request object and evaluates the response code.
+ */
+public class RestApiUtils {
+    /**
+     * Executes a web request and throws an exception if the status code is 
incorrect.
+     *
+     * @param request
+     * @return
+     * @throws Exception
+     */
+    public  static JsonElement executeRequest(WebRequest request) throws 
Exception {
+        WebResponse response = request.getResponse();
+        int responseCode = response.getStatusCode();
+        String responseBody = response.getContent();
+
+        if (responseCode != HttpStatus.SC_OK && responseCode != 
HttpStatus.SC_CREATED && responseCode != HttpStatus.SC_ACCEPTED) {
+            throw new RuntimeException(String.format("%d:%s", responseCode, 
responseBody));
+        }
+
+        return new JsonParser().parse(new JsonReader(new 
StringReader(responseBody)));
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ffb4d3b8/ambari-funtest/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-funtest/src/test/resources/log4j.properties 
b/ambari-funtest/src/test/resources/log4j.properties
new file mode 100644
index 0000000..f6767d3
--- /dev/null
+++ b/ambari-funtest/src/test/resources/log4j.properties
@@ -0,0 +1,21 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+# log4j configuration used during build and unit tests
+
+log4j.rootLogger=DEBUG,stdout
+log4j.threshhold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2} 
(%F:%M(%L)) - %m%n
+
+log4j.logger.org.apache.ambari=DEBUG

http://git-wip-us.apache.org/repos/asf/ambari/blob/ffb4d3b8/ambari-funtest/src/test/resources/os_family.json
----------------------------------------------------------------------
diff --git a/ambari-funtest/src/test/resources/os_family.json 
b/ambari-funtest/src/test/resources/os_family.json
new file mode 100644
index 0000000..df55b61
--- /dev/null
+++ b/ambari-funtest/src/test/resources/os_family.json
@@ -0,0 +1,45 @@
+{
+  "redhat": {
+    "distro": [
+      "redhat",
+      "fedora",
+      "centos",
+      "oraclelinux"
+    ],
+    "versions": [
+      5,
+      6
+    ]
+  },
+  "ubuntu": {
+    "distro": [
+      "ubuntu",
+      "debian"
+    ],
+    "versions": [
+      12
+    ]
+  },
+  "suse": {
+    "distro": [
+      "sles",
+      "sled",
+      "opensuse",
+      "suse"
+    ],
+    "versions": [
+      11
+    ]
+  },
+  "winsrv": {
+    "distro": [
+      "win2008server",
+      "win2008serverr2",
+      "win2012server",
+      "win2012serverr2"
+    ],
+    "versions": [
+      6
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ffb4d3b8/ambari-funtest/src/test/resources/stacks/HDP/0.1/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-funtest/src/test/resources/stacks/HDP/0.1/metainfo.xml 
b/ambari-funtest/src/test/resources/stacks/HDP/0.1/metainfo.xml
new file mode 100644
index 0000000..768f110
--- /dev/null
+++ b/ambari-funtest/src/test/resources/stacks/HDP/0.1/metainfo.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <versions>
+         <upgrade>0.0</upgrade>
+    </versions>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ffb4d3b8/ambari-funtest/src/test/resources/stacks/HDP/0.1/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-funtest/src/test/resources/stacks/HDP/0.1/repos/repoinfo.xml 
b/ambari-funtest/src/test/resources/stacks/HDP/0.1/repos/repoinfo.xml
new file mode 100644
index 0000000..1ea1809
--- /dev/null
+++ b/ambari-funtest/src/test/resources/stacks/HDP/0.1/repos/repoinfo.xml
@@ -0,0 +1,57 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <os family="centos6, redhat6">
+    <repo>
+      
<baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos6</baseurl>
+      <repoid>HDP-1.1.1.16</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      
<baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.15</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <mirrorslist></mirrorslist>
+    </repo>
+    <repo>
+      <baseurl></baseurl>
+      <repoid>epel</repoid>
+      <reponame>epel</reponame>
+      
<mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch]]></mirrorslist>
+    </repo>
+  </os>
+  <os family="centos5, redhat5">
+    <repo>
+      
<baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5</baseurl>
+      <repoid>HDP-1.1.1.16</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      
<baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5</baseurl>
+      <repoid>HDP-UTILS-1.1.0.15</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <mirrorslist></mirrorslist>
+    </repo>
+    <repo>
+      <baseurl></baseurl>
+      <repoid>epel</repoid>
+      <reponame>epel</reponame>
+      
<mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-5&arch=$basearch]]></mirrorslist>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ffb4d3b8/ambari-funtest/src/test/resources/stacks/HDP/0.1/services/HDFS/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-funtest/src/test/resources/stacks/HDP/0.1/services/HDFS/configuration/hbase-site.xml
 
b/ambari-funtest/src/test/resources/stacks/HDP/0.1/services/HDFS/configuration/hbase-site.xml
new file mode 100644
index 0000000..5024e85
--- /dev/null
+++ 
b/ambari-funtest/src/test/resources/stacks/HDP/0.1/services/HDFS/configuration/hbase-site.xml
@@ -0,0 +1,137 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.regionserver.msginterval</name>
+    <value>1000</value>
+    <description>Interval between messages from the RegionServer to HMaster
+    in milliseconds.  Default is 15. Set this value low if you want unit
+    tests to be responsive.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.pause</name>
+    <value>5000</value>
+    <description>General client pause value.  Used mostly as value to wait
+    before running a retry of a failed get, region lookup, etc.</description>
+  </property>
+  <property>
+    <name>hbase.master.meta.thread.rescanfrequency</name>
+    <value>10000</value>
+    <description>How long the HMaster sleeps (in milliseconds) between scans of
+    the root and meta tables.
+    </description>
+  </property>
+  <property>
+    <name>hbase.server.thread.wakefrequency</name>
+    <value>1000</value>
+    <description>Time to sleep in between searches for work (in milliseconds).
+    Used as sleep interval by service threads such as META scanner and log 
roller.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value>5</value>
+    <description>Count of RPC Server instances spun up on RegionServers
+    Same property is used by the HMaster for count of master handlers.
+    Default is 10.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.period</name>
+    <value>6000</value>
+    <description>Length of time the master will wait before timing out a region
+    server lease. Since region servers report in every second (see above), this
+    value has been reduced so that the master will notice a dead region server
+    sooner. The default is 30 seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase master web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase regionserver web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port.auto</name>
+    <value>true</value>
+    <description>Info server auto port bind. Enables automatic port
+    search if hbase.regionserver.info.port is already in use.
+    Enabled for testing to run multiple tests on one machine.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.thread.wakefrequency</name>
+    <value>3000</value>
+    <description>The interval between checks for expired region server leases.
+    This value has been reduced due to the other reduced values above so that
+    the master will notice a dead region server sooner. The default is 15 
seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.optionalcacheflushinterval</name>
+    <value>10000</value>
+    <description>
+    Amount of time to wait since the last time a region was flushed before
+    invoking an optional cache flush. Default 60,000.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.safemode</name>
+    <value>false</value>
+    <description>
+    Turn on/off safe mode in region server. Always on for production, always 
off
+    for tests.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>67108864</value>
+    <description>
+    Maximum desired file size for an HRegion.  If filesize exceeds
+    value + (value / 2), the HRegion is split in two.  Default: 256M.
+
+    Keep the maximum filesize small so we split more often in tests.
+    </description>
+  </property>
+  <property>
+    <name>hadoop.log.dir</name>
+    <value>${user.dir}/../logs</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>21818</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ffb4d3b8/ambari-funtest/src/test/resources/stacks/HDP/0.1/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-funtest/src/test/resources/stacks/HDP/0.1/services/HDFS/configuration/hdfs-site.xml
 
b/ambari-funtest/src/test/resources/stacks/HDP/0.1/services/HDFS/configuration/hdfs-site.xml
new file mode 100644
index 0000000..995f6c1
--- /dev/null
+++ 
b/ambari-funtest/src/test/resources/stacks/HDP/0.1/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,396 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<!-- file system properties -->
+
+  <property>
+    <name>dfs.name.dir</name>
+    <!-- cluster variant -->
+    <value>/mnt/hmc/hadoop/hdfs/namenode</value>
+    <description>Determines where on the local filesystem the DFS name node
+      should store the name table.  If this is a comma-delimited list
+      of directories then the name table is replicated in all of the
+      directories, for redundancy. </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.support.append</name>
+    <value>true</value>
+    <description>to enable dfs append</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>false</value>
+    <description>to enable webhdfs</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.failed.volumes.tolerated</name>
+    <value>0</value>
+    <description>#of failed disks dn would tolerate</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.block.local-path-access.user</name>
+    <value>hbase</value>
+    <description>the user who is allowed to perform short
+    circuit reads.
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.data.dir</name>
+    <value>/mnt/hmc/hadoop/hdfs/data</value>
+    <description>Determines where on the local filesystem an DFS data node
+  should store its blocks.  If this is a comma-delimited
+  list of directories, then data will be stored in all named
+  directories, typically on different devices.
+  Directories that do not exist are ignored.
+  </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.hosts.exclude</name>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
+    <description>Names a file that contains a list of hosts that are
+    not permitted to connect to the namenode.  The full pathname of the
+    file must be specified.  If the value is empty, no hosts are
+    excluded.</description>
+  </property>
+
+  <property>
+    <name>dfs.hosts</name>
+    <value>/etc/hadoop/conf/dfs.include</value>
+    <description>Names a file that contains a list of hosts that are
+    permitted to connect to the namenode. The full pathname of the file
+    must be specified.  If the value is empty, all hosts are
+    permitted.</description>
+  </property>
+
+  <property>
+    <name>dfs.replication.max</name>
+    <value>50</value>
+    <description>Maximal block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.replication</name>
+    <value>3</value>
+    <description>Default block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in 
seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.safemode.threshold.pct</name>
+    <value>1.0f</value>
+    <description>
+        Specifies the percentage of blocks that should satisfy
+        the minimal replication requirement defined by dfs.replication.min.
+        Values less than or equal to 0 mean not to start in safe mode.
+        Values greater than 1 will make safe mode permanent.
+        </description>
+  </property>
+
+  <property>
+    <name>dfs.balance.bandwidthPerSec</name>
+    <value>6250000</value>
+    <description>
+        Specifies the maximum amount of bandwidth that each datanode
+        can utilize for the balancing purpose in term of
+        the number of bytes per second.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.address</name>
+    <value>0.0.0.0:50010</value>
+  </property>
+
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:50075</value>
+  </property>
+
+  <property>
+    <name>dfs.block.size</name>
+    <value>134217728</value>
+    <description>The default block size for new files.</description>
+  </property>
+
+  <property>
+    <name>dfs.http.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50070</value>
+<description>The name of the default file system.  Either the
+literal string "local" or a host:port for HDFS.</description>
+<final>true</final>
+</property>
+
+<property>
+<name>dfs.datanode.du.reserved</name>
+<!-- cluster variant -->
+<value>1073741824</value>
+<description>Reserved space in bytes per volume. Always leave this much space 
free for non dfs use.
+</description>
+</property>
+
+<property>
+<name>dfs.datanode.ipc.address</name>
+<value>0.0.0.0:8010</value>
+<description>
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+</description>
+</property>
+
+<property>
+<name>dfs.blockreport.initialDelay</name>
+<value>120</value>
+<description>Delay for first block report in seconds.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>40</value>
+<description>The number of server threads for the namenode.</description>
+</property>
+
+<property>
+<name>dfs.datanode.max.xcievers</name>
+<value>1024</value>
+<description>PRIVATE CONFIG VARIABLE</description>
+</property>
+
+<!-- Permissions configuration -->
+
+<property>
+<name>dfs.umaskmode</name>
+<value>077</value>
+<description>
+The octal umask used when creating files and directories.
+</description>
+</property>
+
+<property>
+<name>dfs.web.ugi</name>
+<!-- cluster variant -->
+<value>gopher,gopher</value>
+<description>The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+</description>
+</property>
+
+<property>
+<name>dfs.permissions</name>
+<value>true</value>
+<description>
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+</description>
+</property>
+
+<property>
+<name>dfs.permissions.supergroup</name>
+<value>hdfs</value>
+<description>The name of the group of super-users.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>100</value>
+<description>Added to grow Queue size so that more client connections are 
allowed</description>
+</property>
+
+<property>
+<name>ipc.server.max.response.size</name>
+<value>5242880</value>
+</property>
+<property>
+<name>dfs.block.access.token.enable</name>
+<value>true</value>
+<description>
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+<description>
+Kerberos principal name for the NameNode
+</description>
+</property>
+
+<property>
+<name>dfs.secondary.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+    <description>
+        Kerberos principal name for the secondary NameNode.
+    </description>
+  </property>
+
+
+<!--
+  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+-->
+  <property>
+    <name>dfs.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+     <description>The Kerberos principal for the host that the NameNode runs 
on.</description>
+
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+    <description>The Kerberos principal for the hostthat the secondary 
NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <!-- cluster variant -->
+    <name>dfs.secondary.http.address</name>
+    <value>hdp2.cybervisiontech.com.ua:50090</value>
+    <description>Address of secondary namenode web server</description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.https.port</name>
+    <value>50490</value>
+    <description>The https port where secondary-namenode binds</description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.principal</name>
+    <value>HTTP/_HOST@</value>
+    <description>
+      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+      HTTP SPENGO specification.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.keytab</name>
+    <value>/nn.service.keytab</value>
+    <description>
+      The Kerberos keytab file with the credentials for the
+      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.kerberos.principal</name>
+    <value>dn/_HOST@</value>
+ <description>
+        The Kerberos principal that the DataNode runs as. "_HOST" is replaced 
by the real host name.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+ <description>
+        Combined keytab file containing the namenode service and host 
principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+  <description>
+        Combined keytab file containing the namenode service and host 
principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.keytab.file</name>
+    <value>/dn.service.keytab</value>
+ <description>
+        The filename of the keytab file for the DataNode.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.https.port</name>
+    <value>50470</value>
+ <description>The https port where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.https.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50470</value>
+  <description>The https address where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir.perm</name>
+    <value>750</value>
+<description>The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.</description>
+  </property>
+
+  <property>
+  <name>dfs.access.time.precision</name>
+  <value>0</value>
+  <description>The access time for HDFS file is precise up to this value.
+               The default value is 1 hour. Setting a value of 0 disables
+               access times for HDFS.
+  </description>
+</property>
+
+<property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the 
HDFS</description>
+</property>
+
+<property>
+  <name>ipc.server.read.threadpool.size</name>
+  <value>5</value>
+  <description></description>
+</property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ffb4d3b8/ambari-funtest/src/test/resources/stacks/HDP/0.1/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-funtest/src/test/resources/stacks/HDP/0.1/services/HDFS/metainfo.xml 
b/ambari-funtest/src/test/resources/stacks/HDP/0.1/services/HDFS/metainfo.xml
new file mode 100644
index 0000000..f4195a8
--- /dev/null
+++ 
b/ambari-funtest/src/test/resources/stacks/HDP/0.1/services/HDFS/metainfo.xml
@@ -0,0 +1,155 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <comment>This is comment for HDFS service</comment>
+      <version>1.0</version>
+
+      <components>
+        <component>
+          <name>NAMENODE</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/namenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/namenode.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>SECONDARY_NAMENODE</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/snamenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>DATANODE</name>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/datanode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>DATANODE1</name>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/datanode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>DATANODE2</name>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/datanode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>HDFS_CLIENT</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/hdfs_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>lzo</name>
+            </package>
+            <package>
+              <name>hadoop</name>
+            </package>
+            <package>
+              <name>hadoop-libhdfs</name>
+            </package>
+            <package>
+              <name>hadoop-native</name>
+            </package>
+            <package>
+              <name>hadoop-pipes</name>
+            </package>
+            <package>
+              <name>hadoop-sbin</name>
+            </package>
+            <package>
+              <name>hadoop-lzo</name>
+            </package>
+            <package>
+              <name>hadoop-lzo-native</name>
+            </package>
+            <package>
+              <name>snappy</name>
+            </package>
+            <package>
+              <name>snappy-devel</name>
+            </package>
+            <package>
+              <name>ambari-log4j</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-policy</config-type>
+        <config-type>hdfs-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ffb4d3b8/ambari-funtest/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-funtest/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/hbase-site.xml
 
b/ambari-funtest/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/hbase-site.xml
new file mode 100644
index 0000000..5024e85
--- /dev/null
+++ 
b/ambari-funtest/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/hbase-site.xml
@@ -0,0 +1,137 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.regionserver.msginterval</name>
+    <value>1000</value>
+    <description>Interval between messages from the RegionServer to HMaster
+    in milliseconds.  Default is 15. Set this value low if you want unit
+    tests to be responsive.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.pause</name>
+    <value>5000</value>
+    <description>General client pause value.  Used mostly as value to wait
+    before running a retry of a failed get, region lookup, etc.</description>
+  </property>
+  <property>
+    <name>hbase.master.meta.thread.rescanfrequency</name>
+    <value>10000</value>
+    <description>How long the HMaster sleeps (in milliseconds) between scans of
+    the root and meta tables.
+    </description>
+  </property>
+  <property>
+    <name>hbase.server.thread.wakefrequency</name>
+    <value>1000</value>
+    <description>Time to sleep in between searches for work (in milliseconds).
+    Used as sleep interval by service threads such as META scanner and log 
roller.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value>5</value>
+    <description>Count of RPC Server instances spun up on RegionServers
+    Same property is used by the HMaster for count of master handlers.
+    Default is 10.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.period</name>
+    <value>6000</value>
+    <description>Length of time the master will wait before timing out a region
+    server lease. Since region servers report in every second (see above), this
+    value has been reduced so that the master will notice a dead region server
+    sooner. The default is 30 seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase master web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase regionserver web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port.auto</name>
+    <value>true</value>
+    <description>Info server auto port bind. Enables automatic port
+    search if hbase.regionserver.info.port is already in use.
+    Enabled for testing to run multiple tests on one machine.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.thread.wakefrequency</name>
+    <value>3000</value>
+    <description>The interval between checks for expired region server leases.
+    This value has been reduced due to the other reduced values above so that
+    the master will notice a dead region server sooner. The default is 15 
seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.optionalcacheflushinterval</name>
+    <value>10000</value>
+    <description>
+    Amount of time to wait since the last time a region was flushed before
+    invoking an optional cache flush. Default 60,000.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.safemode</name>
+    <value>false</value>
+    <description>
+    Turn on/off safe mode in region server. Always on for production, always 
off
+    for tests.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>67108864</value>
+    <description>
+    Maximum desired file size for an HRegion.  If filesize exceeds
+    value + (value / 2), the HRegion is split in two.  Default: 256M.
+
+    Keep the maximum filesize small so we split more often in tests.
+    </description>
+  </property>
+  <property>
+    <name>hadoop.log.dir</name>
+    <value>${user.dir}/../logs</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>21818</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ffb4d3b8/ambari-funtest/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-funtest/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/hdfs-site.xml
 
b/ambari-funtest/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/hdfs-site.xml
new file mode 100644
index 0000000..995f6c1
--- /dev/null
+++ 
b/ambari-funtest/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/hdfs-site.xml
@@ -0,0 +1,396 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<!-- file system properties -->
+
+  <property>
+    <name>dfs.name.dir</name>
+    <!-- cluster variant -->
+    <value>/mnt/hmc/hadoop/hdfs/namenode</value>
+    <description>Determines where on the local filesystem the DFS name node
+      should store the name table.  If this is a comma-delimited list
+      of directories then the name table is replicated in all of the
+      directories, for redundancy. </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.support.append</name>
+    <value>true</value>
+    <description>to enable dfs append</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>false</value>
+    <description>to enable webhdfs</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.failed.volumes.tolerated</name>
+    <value>0</value>
+    <description>#of failed disks dn would tolerate</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.block.local-path-access.user</name>
+    <value>hbase</value>
+    <description>the user who is allowed to perform short
+    circuit reads.
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.data.dir</name>
+    <value>/mnt/hmc/hadoop/hdfs/data</value>
+    <description>Determines where on the local filesystem an DFS data node
+  should store its blocks.  If this is a comma-delimited
+  list of directories, then data will be stored in all named
+  directories, typically on different devices.
+  Directories that do not exist are ignored.
+  </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.hosts.exclude</name>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
+    <description>Names a file that contains a list of hosts that are
+    not permitted to connect to the namenode.  The full pathname of the
+    file must be specified.  If the value is empty, no hosts are
+    excluded.</description>
+  </property>
+
+  <property>
+    <name>dfs.hosts</name>
+    <value>/etc/hadoop/conf/dfs.include</value>
+    <description>Names a file that contains a list of hosts that are
+    permitted to connect to the namenode. The full pathname of the file
+    must be specified.  If the value is empty, all hosts are
+    permitted.</description>
+  </property>
+
+  <property>
+    <name>dfs.replication.max</name>
+    <value>50</value>
+    <description>Maximal block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.replication</name>
+    <value>3</value>
+    <description>Default block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in 
seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.safemode.threshold.pct</name>
+    <value>1.0f</value>
+    <description>
+        Specifies the percentage of blocks that should satisfy
+        the minimal replication requirement defined by dfs.replication.min.
+        Values less than or equal to 0 mean not to start in safe mode.
+        Values greater than 1 will make safe mode permanent.
+        </description>
+  </property>
+
+  <property>
+    <name>dfs.balance.bandwidthPerSec</name>
+    <value>6250000</value>
+    <description>
+        Specifies the maximum amount of bandwidth that each datanode
+        can utilize for the balancing purpose in term of
+        the number of bytes per second.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.address</name>
+    <value>0.0.0.0:50010</value>
+  </property>
+
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:50075</value>
+  </property>
+
+  <property>
+    <name>dfs.block.size</name>
+    <value>134217728</value>
+    <description>The default block size for new files.</description>
+  </property>
+
+  <property>
+    <name>dfs.http.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50070</value>
+<description>The name of the default file system.  Either the
+literal string "local" or a host:port for HDFS.</description>
+<final>true</final>
+</property>
+
+<property>
+<name>dfs.datanode.du.reserved</name>
+<!-- cluster variant -->
+<value>1073741824</value>
+<description>Reserved space in bytes per volume. Always leave this much space 
free for non dfs use.
+</description>
+</property>
+
+<property>
+<name>dfs.datanode.ipc.address</name>
+<value>0.0.0.0:8010</value>
+<description>
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+</description>
+</property>
+
+<property>
+<name>dfs.blockreport.initialDelay</name>
+<value>120</value>
+<description>Delay for first block report in seconds.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>40</value>
+<description>The number of server threads for the namenode.</description>
+</property>
+
+<property>
+<name>dfs.datanode.max.xcievers</name>
+<value>1024</value>
+<description>PRIVATE CONFIG VARIABLE</description>
+</property>
+
+<!-- Permissions configuration -->
+
+<property>
+<name>dfs.umaskmode</name>
+<value>077</value>
+<description>
+The octal umask used when creating files and directories.
+</description>
+</property>
+
+<property>
+<name>dfs.web.ugi</name>
+<!-- cluster variant -->
+<value>gopher,gopher</value>
+<description>The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+</description>
+</property>
+
+<property>
+<name>dfs.permissions</name>
+<value>true</value>
+<description>
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+</description>
+</property>
+
+<property>
+<name>dfs.permissions.supergroup</name>
+<value>hdfs</value>
+<description>The name of the group of super-users.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>100</value>
+<description>Added to grow Queue size so that more client connections are 
allowed</description>
+</property>
+
+<property>
+<name>ipc.server.max.response.size</name>
+<value>5242880</value>
+</property>
+<property>
+<name>dfs.block.access.token.enable</name>
+<value>true</value>
+<description>
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+<description>
+Kerberos principal name for the NameNode
+</description>
+</property>
+
+<property>
+<name>dfs.secondary.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+    <description>
+        Kerberos principal name for the secondary NameNode.
+    </description>
+  </property>
+
+
+<!--
+  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+-->
+  <property>
+    <name>dfs.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+     <description>The Kerberos principal for the host that the NameNode runs 
on.</description>
+
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+    <description>The Kerberos principal for the hostthat the secondary 
NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <!-- cluster variant -->
+    <name>dfs.secondary.http.address</name>
+    <value>hdp2.cybervisiontech.com.ua:50090</value>
+    <description>Address of secondary namenode web server</description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.https.port</name>
+    <value>50490</value>
+    <description>The https port where secondary-namenode binds</description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.principal</name>
+    <value>HTTP/_HOST@</value>
+    <description>
+      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+      HTTP SPENGO specification.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.keytab</name>
+    <value>/nn.service.keytab</value>
+    <description>
+      The Kerberos keytab file with the credentials for the
+      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.kerberos.principal</name>
+    <value>dn/_HOST@</value>
+ <description>
+        The Kerberos principal that the DataNode runs as. "_HOST" is replaced 
by the real host name.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+ <description>
+        Combined keytab file containing the namenode service and host 
principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+  <description>
+        Combined keytab file containing the namenode service and host 
principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.keytab.file</name>
+    <value>/dn.service.keytab</value>
+ <description>
+        The filename of the keytab file for the DataNode.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.https.port</name>
+    <value>50470</value>
+ <description>The https port where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.https.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50470</value>
+  <description>The https address where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir.perm</name>
+    <value>750</value>
+<description>The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.</description>
+  </property>
+
+  <property>
+  <name>dfs.access.time.precision</name>
+  <value>0</value>
+  <description>The access time for HDFS file is precise up to this value.
+               The default value is 1 hour. Setting a value of 0 disables
+               access times for HDFS.
+  </description>
+</property>
+
+<property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the 
HDFS</description>
+</property>
+
+<property>
+  <name>ipc.server.read.threadpool.size</name>
+  <value>5</value>
+  <description></description>
+</property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ffb4d3b8/ambari-funtest/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-funtest/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
 
b/ambari-funtest/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
new file mode 100644
index 0000000..db3a0ac
--- /dev/null
+++ 
b/ambari-funtest/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
@@ -0,0 +1,400 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<!-- file system properties -->
+
+  <property>
+    <name>dfs.name.dir</name>
+    <!-- cluster variant -->
+    <value>/mnt/hmc/hadoop/hdfs/namenode</value>
+    <description>Determines where on the local filesystem the DFS name node
+      should store the name table.  If this is a comma-delimited list
+      of directories then the name table is replicated in all of the
+      directories, for redundancy. </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.support.append</name>
+    <value>true</value>
+    <description>to enable dfs append</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>false</value>
+    <description>to enable webhdfs</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.failed.volumes.tolerated</name>
+    <value>0</value>
+    <description>#of failed disks dn would tolerate</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.block.local-path-access.user</name>
+    <value>hbase</value>
+    <description>the user who is allowed to perform short
+    circuit reads.
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.data.dir</name>
+    <value>/mnt/hmc/hadoop/hdfs/data</value>
+    <description>Determines where on the local filesystem an DFS data node
+  should store its blocks.  If this is a comma-delimited
+  list of directories, then data will be stored in all named
+  directories, typically on different devices.
+  Directories that do not exist are ignored.
+  </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.hosts.exclude</name>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
+    <description>Names a file that contains a list of hosts that are
+    not permitted to connect to the namenode.  The full pathname of the
+    file must be specified.  If the value is empty, no hosts are
+    excluded.</description>
+  </property>
+
+  <property>
+    <name>dfs.hosts</name>
+    <value>/etc/hadoop/conf/dfs.include</value>
+    <description>Names a file that contains a list of hosts that are
+    permitted to connect to the namenode. The full pathname of the file
+    must be specified.  If the value is empty, all hosts are
+    permitted.</description>
+  </property>
+
+  <property>
+    <name>dfs.replication.max</name>
+    <value>50</value>
+    <description>Maximal block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.replication</name>
+    <value>3</value>
+    <description>Default block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in 
seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.safemode.threshold.pct</name>
+    <value>1.0f</value>
+    <description>
+        Specifies the percentage of blocks that should satisfy
+        the minimal replication requirement defined by dfs.replication.min.
+        Values less than or equal to 0 mean not to start in safe mode.
+        Values greater than 1 will make safe mode permanent.
+        </description>
+  </property>
+
+  <property>
+    <name>dfs.balance.bandwidthPerSec</name>
+    <value>6250000</value>
+    <description>
+        Specifies the maximum amount of bandwidth that each datanode
+        can utilize for the balancing purpose in term of
+        the number of bytes per second.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.address</name>
+    <value>0.0.0.0:50010</value>
+    <description>Address where the datanode binds</description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:50075</value>
+    <description>HTTP address for the datanode</description>
+  </property>
+
+  <property>
+    <name>dfs.block.size</name>
+    <value>134217728</value>
+    <description>The default block size for new files.</description>
+  </property>
+
+  <property>
+    <name>dfs.http.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50070</value>
+<description>The name of the default file system.  Either the
+literal string "local" or a host:port for HDFS.</description>
+<final>true</final>
+</property>
+
+<property>
+<name>dfs.datanode.du.reserved</name>
+<!-- cluster variant -->
+<value>1073741824</value>
+<description>Reserved space in bytes per volume. Always leave this much space 
free for non dfs use.
+</description>
+</property>
+
+<property>
+<name>dfs.datanode.ipc.address</name>
+<value>0.0.0.0:8010</value>
+<description>
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+</description>
+</property>
+
+<property>
+<name>dfs.blockreport.initialDelay</name>
+<value>120</value>
+<description>Delay for first block report in seconds.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>40</value>
+<description>The number of server threads for the namenode.</description>
+</property>
+
+<property>
+<name>dfs.datanode.max.xcievers</name>
+<value>1024</value>
+<description>PRIVATE CONFIG VARIABLE</description>
+</property>
+
+<!-- Permissions configuration -->
+
+<property>
+<name>dfs.umaskmode</name>
+<value>077</value>
+<description>
+The octal umask used when creating files and directories.
+</description>
+</property>
+
+<property>
+<name>dfs.web.ugi</name>
+<!-- cluster variant -->
+<value>gopher,gopher</value>
+<description>The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+</description>
+</property>
+
+<property>
+<name>dfs.permissions</name>
+<value>true</value>
+<description>
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+</description>
+</property>
+
+<property>
+<name>dfs.permissions.supergroup</name>
+<value>hdfs</value>
+<description>The name of the group of super-users.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>100</value>
+<description>Added to grow Queue size so that more client connections are 
allowed</description>
+</property>
+
+<property>
+<name>ipc.server.max.response.size</name>
+<value>5242880</value>
+<description>The max response size for IPC</description>
+</property>
+
+<property>
+<name>dfs.block.access.token.enable</name>
+<value>true</value>
+<description>
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+<description>
+Kerberos principal name for the NameNode
+</description>
+</property>
+
+<property>
+<name>dfs.secondary.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+    <description>
+        Kerberos principal name for the secondary NameNode.
+    </description>
+  </property>
+
+
+<!--
+  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+-->
+  <property>
+    <name>dfs.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+     <description>The Kerberos principal for the host that the NameNode runs 
on.</description>
+
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+    <description>The Kerberos principal for the hostthat the secondary 
NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <!-- cluster variant -->
+    <name>dfs.secondary.http.address</name>
+    <value>hdp2.cybervisiontech.com.ua:50090</value>
+    <description>Address of secondary namenode web server</description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.https.port</name>
+    <value>50490</value>
+    <description>The https port where secondary-namenode binds</description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.principal</name>
+    <value>HTTP/_HOST@</value>
+    <description>
+      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+      HTTP SPENGO specification.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.keytab</name>
+    <value>/nn.service.keytab</value>
+    <description>
+      The Kerberos keytab file with the credentials for the
+      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.kerberos.principal</name>
+    <value>dn/_HOST@</value>
+ <description>
+        The Kerberos principal that the DataNode runs as. "_HOST" is replaced 
by the real host name.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+ <description>
+        Combined keytab file containing the namenode service and host 
principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+  <description>
+        Combined keytab file containing the namenode service and host 
principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.keytab.file</name>
+    <value>/dn.service.keytab</value>
+ <description>
+        The filename of the keytab file for the DataNode.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.https.port</name>
+    <value>50470</value>
+ <description>The https port where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.https.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50470</value>
+  <description>The https address where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir.perm</name>
+    <value>750</value>
+<description>The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.</description>
+  </property>
+
+  <property>
+  <name>dfs.access.time.precision</name>
+  <value>0</value>
+  <description>The access time for HDFS file is precise up to this value.
+               The default value is 1 hour. Setting a value of 0 disables
+               access times for HDFS.
+  </description>
+</property>
+
+<property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the 
HDFS</description>
+</property>
+
+<property>
+  <name>ipc.server.read.threadpool.size</name>
+  <value>5</value>
+  <description>IPC thread size</description>
+</property>
+
+</configuration>

Reply via email to