http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestRegistrySecurityHelper.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestRegistrySecurityHelper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestRegistrySecurityHelper.java
deleted file mode 100644
index 8d0dc6a..0000000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestRegistrySecurityHelper.java
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.registry.secure;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.registry.client.api.RegistryConstants;
-import org.apache.hadoop.registry.client.impl.zk.RegistrySecurity;
-import org.apache.zookeeper.ZooDefs;
-import org.apache.zookeeper.data.ACL;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.List;
-
-import static org.apache.hadoop.registry.client.api.RegistryConstants.*;
-
-/**
- * Test for registry security operations
- */
-public class TestRegistrySecurityHelper extends Assert {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestRegistrySecurityHelper.class);
-
-  public static final String YARN_EXAMPLE_COM = "[email protected]";
-  public static final String SASL_YARN_EXAMPLE_COM =
-      "sasl:" + YARN_EXAMPLE_COM;
-  public static final String MAPRED_EXAMPLE_COM = "[email protected]";
-  public static final String SASL_MAPRED_EXAMPLE_COM =
-      "sasl:" + MAPRED_EXAMPLE_COM;
-  public static final String SASL_MAPRED_APACHE = "sasl:mapred@APACHE";
-  public static final String DIGEST_F0AF = "digest:f0afbeeb00baa";
-  public static final String SASL_YARN_SHORT = "sasl:yarn@";
-  public static final String SASL_MAPRED_SHORT = "sasl:mapred@";
-  public static final String REALM_EXAMPLE_COM = "example.com";
-  private static RegistrySecurity registrySecurity;
-
-  @BeforeClass
-  public static void setupTestRegistrySecurityHelper() throws IOException {
-    Configuration conf = new Configuration();
-    conf.setBoolean(KEY_REGISTRY_SECURE, true);
-    conf.set(KEY_REGISTRY_KERBEROS_REALM, "KERBEROS");
-    registrySecurity = new RegistrySecurity("");
-    // init the ACLs OUTSIDE A KERBEROS CLUSTER
-    registrySecurity.init(conf);
-  }
-
-  @Test
-  public void testACLSplitRealmed() throws Throwable {
-    List<String> pairs =
-        registrySecurity.splitAclPairs(
-            SASL_YARN_EXAMPLE_COM +
-            ", " +
-            SASL_MAPRED_EXAMPLE_COM,
-            "");
-
-    assertEquals(SASL_YARN_EXAMPLE_COM, pairs.get(0));
-    assertEquals(SASL_MAPRED_EXAMPLE_COM, pairs.get(1));
-  }
-
-
-  @Test
-  public void testBuildAclsRealmed() throws Throwable {
-    List<ACL> acls = registrySecurity.buildACLs(
-        SASL_YARN_EXAMPLE_COM +
-        ", " +
-        SASL_MAPRED_EXAMPLE_COM,
-        "",
-        ZooDefs.Perms.ALL);
-    assertEquals(YARN_EXAMPLE_COM, acls.get(0).getId().getId());
-    assertEquals(MAPRED_EXAMPLE_COM, acls.get(1).getId().getId());
-  }
-
-  @Test
-  public void testACLDefaultRealm() throws Throwable {
-    List<String> pairs =
-        registrySecurity.splitAclPairs(
-            SASL_YARN_SHORT +
-            ", " +
-            SASL_MAPRED_SHORT,
-            REALM_EXAMPLE_COM);
-
-    assertEquals(SASL_YARN_EXAMPLE_COM, pairs.get(0));
-    assertEquals(SASL_MAPRED_EXAMPLE_COM, pairs.get(1));
-  }
-
-  @Test
-  public void testBuildAclsDefaultRealm() throws Throwable {
-    List<ACL> acls = registrySecurity.buildACLs(
-        SASL_YARN_SHORT +
-        ", " +
-        SASL_MAPRED_SHORT,
-        REALM_EXAMPLE_COM, ZooDefs.Perms.ALL);
-
-    assertEquals(YARN_EXAMPLE_COM, acls.get(0).getId().getId());
-    assertEquals(MAPRED_EXAMPLE_COM, acls.get(1).getId().getId());
-  }
-
-  @Test
-  public void testACLSplitNullRealm() throws Throwable {
-    List<String> pairs =
-        registrySecurity.splitAclPairs(
-            SASL_YARN_SHORT +
-            ", " +
-            SASL_MAPRED_SHORT,
-            "");
-
-    assertEquals(SASL_YARN_SHORT, pairs.get(0));
-    assertEquals(SASL_MAPRED_SHORT, pairs.get(1));
-  }
-
-  @Test(expected = IllegalArgumentException.class)
-  public void testBuildAclsNullRealm() throws Throwable {
-    registrySecurity.buildACLs(
-        SASL_YARN_SHORT +
-        ", " +
-        SASL_MAPRED_SHORT,
-        "", ZooDefs.Perms.ALL);
-    fail("");
-
-  }
-
-  @Test
-  public void testACLDefaultRealmOnlySASL() throws Throwable {
-    List<String> pairs =
-        registrySecurity.splitAclPairs(
-            SASL_YARN_SHORT +
-            ", " +
-            DIGEST_F0AF,
-            REALM_EXAMPLE_COM);
-
-    assertEquals(SASL_YARN_EXAMPLE_COM, pairs.get(0));
-    assertEquals(DIGEST_F0AF, pairs.get(1));
-  }
-
-  @Test
-  public void testACLSplitMixed() throws Throwable {
-    List<String> pairs =
-        registrySecurity.splitAclPairs(
-            SASL_YARN_SHORT +
-            ", " +
-            SASL_MAPRED_APACHE +
-            ", ,," +
-            DIGEST_F0AF,
-            REALM_EXAMPLE_COM);
-
-    assertEquals(SASL_YARN_EXAMPLE_COM, pairs.get(0));
-    assertEquals(SASL_MAPRED_APACHE, pairs.get(1));
-    assertEquals(DIGEST_F0AF, pairs.get(2));
-  }
-
-  @Test
-  public void testDefaultAClsValid() throws Throwable {
-    registrySecurity.buildACLs(
-        RegistryConstants.DEFAULT_REGISTRY_SYSTEM_ACCOUNTS,
-        REALM_EXAMPLE_COM, ZooDefs.Perms.ALL);
-  }
-
-  @Test
-  public void testDefaultRealm() throws Throwable {
-    String realm = RegistrySecurity.getDefaultRealmInJVM();
-    LOG.info("Realm {}", realm);
-  }
-
-  @Test
-  public void testUGIProperties() throws Throwable {
-    UserGroupInformation user = UserGroupInformation.getCurrentUser();
-    ACL acl = registrySecurity.createACLForUser(user, ZooDefs.Perms.ALL);
-    assertFalse(RegistrySecurity.ALL_READWRITE_ACCESS.equals(acl));
-    LOG.info("User {} has ACL {}", user, acl);
-  }
-
-
-  @Test
-  public void testSecurityImpliesKerberos() throws Throwable {
-    Configuration conf = new Configuration();
-    conf.setBoolean("hadoop.security.authentication", true);
-    conf.setBoolean(KEY_REGISTRY_SECURE, true);
-    conf.set(KEY_REGISTRY_KERBEROS_REALM, "KERBEROS");
-    RegistrySecurity security = new RegistrySecurity("registry security");
-    try {
-      security.init(conf);
-    } catch (Exception e) {
-      assertTrue(
-          "did not find "+ RegistrySecurity.E_NO_KERBEROS + " in " + e,
-          e.toString().contains(RegistrySecurity.E_NO_KERBEROS));
-    }
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureLogins.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureLogins.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureLogins.java
deleted file mode 100644
index d66bb79..0000000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureLogins.java
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.registry.secure;
-
-import java.io.File;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.Method;
-import java.security.Principal;
-import java.security.PrivilegedExceptionAction;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import javax.security.auth.Subject;
-import javax.security.auth.callback.CallbackHandler;
-import javax.security.auth.kerberos.KerberosPrincipal;
-import javax.security.auth.login.LoginContext;
-import javax.security.auth.login.LoginException;
-
-import org.apache.zookeeper.Environment;
-import org.apache.zookeeper.data.ACL;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.security.HadoopKerberosName;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.util.KerberosName;
-import org.apache.hadoop.security.authentication.util.KerberosUtil;
-import org.apache.hadoop.registry.client.impl.zk.RegistrySecurity;
-import org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions;
-import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
-
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Verify that logins work
- */
-public class TestSecureLogins extends AbstractSecureRegistryTest {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestSecureLogins.class);
-
-  @Test
-  public void testHasRealm() throws Throwable {
-    assertNotNull(getRealm());
-    LOG.info("ZK principal = {}", getPrincipalAndRealm(ZOOKEEPER_LOCALHOST));
-  }
-
-  @Test
-  public void testJaasFileSetup() throws Throwable {
-    // the JVM has seemed inconsistent on setting up here
-    assertNotNull("jaasFile", jaasFile);
-    String confFilename = System.getProperty(Environment.JAAS_CONF_KEY);
-    assertEquals(jaasFile.getAbsolutePath(), confFilename);
-  }
-
-  @Test
-  public void testJaasFileBinding() throws Throwable {
-    // the JVM has seemed inconsistent on setting up here
-    assertNotNull("jaasFile", jaasFile);
-    RegistrySecurity.bindJVMtoJAASFile(jaasFile);
-    String confFilename = System.getProperty(Environment.JAAS_CONF_KEY);
-    assertEquals(jaasFile.getAbsolutePath(), confFilename);
-  }
-
-  @Test
-  public void testClientLogin() throws Throwable {
-    LoginContext client = login(ALICE_LOCALHOST,
-                                ALICE_CLIENT_CONTEXT,
-                                keytab_alice);
-
-    try {
-      logLoginDetails(ALICE_LOCALHOST, client);
-      String confFilename = System.getProperty(Environment.JAAS_CONF_KEY);
-      assertNotNull("Unset: "+ Environment.JAAS_CONF_KEY, confFilename);
-      String config = FileUtils.readFileToString(new File(confFilename));
-      LOG.info("{}=\n{}", confFilename, config);
-      RegistrySecurity.setZKSaslClientProperties(ALICE, ALICE_CLIENT_CONTEXT);
-    } finally {
-      client.logout();
-    }
-  }
-
-  @Test
-  public void testZKServerContextLogin() throws Throwable {
-    LoginContext client = login(ZOOKEEPER_LOCALHOST,
-                                ZOOKEEPER_SERVER_CONTEXT,
-                                keytab_zk);
-    logLoginDetails(ZOOKEEPER_LOCALHOST, client);
-
-    client.logout();
-  }
-
-  @Test
-  public void testServerLogin() throws Throwable {
-    LoginContext loginContext = createLoginContextZookeeperLocalhost();
-    loginContext.login();
-    loginContext.logout();
-  }
-
-  public LoginContext createLoginContextZookeeperLocalhost() throws
-      LoginException {
-    String principalAndRealm = getPrincipalAndRealm(ZOOKEEPER_LOCALHOST);
-    Set<Principal> principals = new HashSet<Principal>();
-    principals.add(new KerberosPrincipal(ZOOKEEPER_LOCALHOST));
-    Subject subject = new Subject(false, principals, new HashSet<Object>(),
-        new HashSet<Object>());
-    return new LoginContext("", subject, null,
-        KerberosConfiguration.createServerConfig(ZOOKEEPER_LOCALHOST, 
keytab_zk));
-  }
-
-  @Test
-  public void testKerberosAuth() throws Throwable {
-    File krb5conf = getKdc().getKrb5conf();
-    String krbConfig = FileUtils.readFileToString(krb5conf);
-    LOG.info("krb5.conf at {}:\n{}", krb5conf, krbConfig);
-    Subject subject = new Subject();
-    Class<?> kerb5LoginClass =
-        Class.forName(KerberosUtil.getKrb5LoginModuleName());
-    Constructor<?> kerb5LoginConstr = kerb5LoginClass.getConstructor();
-    Object kerb5LoginObject = kerb5LoginConstr.newInstance();
-    final Map<String, String> options = new HashMap<String, String>();
-    options.put("debug", "true");
-    if (IBM_JAVA) {
-      options.put("useKeytab",
-          keytab_alice.getAbsolutePath().startsWith("file://")
-            ? keytab_alice.getAbsolutePath()
-            : "file://" +  keytab_alice.getAbsolutePath());
-      options.put("principal", ALICE_LOCALHOST);
-      options.put("refreshKrb5Config", "true");
-      options.put("credsType", "both");
-      String ticketCache = System.getenv("KRB5CCNAME");
-      if (ticketCache != null) {
-        // IBM JAVA only respect system property and not env variable
-        // The first value searched when "useDefaultCcache" is used.
-        System.setProperty("KRB5CCNAME", ticketCache);
-        options.put("useDefaultCcache", "true");
-        options.put("renewTGT", "true");
-      }
-    } else {
-      options.put("keyTab", keytab_alice.getAbsolutePath());
-      options.put("principal", ALICE_LOCALHOST);
-      options.put("doNotPrompt", "true");
-      options.put("isInitiator", "true");
-      options.put("refreshKrb5Config", "true");
-      options.put("renewTGT", "true");
-      options.put("storeKey", "true");
-      options.put("useKeyTab", "true");
-      options.put("useTicketCache", "true");
-    }
-    Method methodInitialize =
-        kerb5LoginObject.getClass().getMethod("initialize", Subject.class,
-          CallbackHandler.class, Map.class, Map.class);
-    methodInitialize.invoke(kerb5LoginObject, subject, null,
-        new HashMap<String, String>(), options);
-    Method methodLogin = kerb5LoginObject.getClass().getMethod("login");
-    boolean loginOk = (Boolean) methodLogin.invoke(kerb5LoginObject);
-    assertTrue("Failed to login", loginOk);
-    Method methodCommit = kerb5LoginObject.getClass().getMethod("commit");
-    boolean commitOk = (Boolean) methodCommit.invoke(kerb5LoginObject);
-    assertTrue("Failed to Commit", commitOk);
-  }
-
-  @Test
-  public void testDefaultRealmValid() throws Throwable {
-    String defaultRealm = KerberosUtil.getDefaultRealm();
-    assertNotEmpty("No default Kerberos Realm",
-        defaultRealm);
-    LOG.info("Default Realm '{}'", defaultRealm);
-  }
-
-  @Test
-  public void testKerberosRulesValid() throws Throwable {
-    assertTrue("!KerberosName.hasRulesBeenSet()",
-        KerberosName.hasRulesBeenSet());
-    String rules = KerberosName.getRules();
-    assertEquals(kerberosRule, rules);
-    LOG.info(rules);
-  }
-
-  @Test
-  public void testValidKerberosName() throws Throwable {
-
-    new HadoopKerberosName(ZOOKEEPER).getShortName();
-    new HadoopKerberosName(ZOOKEEPER_LOCALHOST).getShortName();
-    new HadoopKerberosName(ZOOKEEPER_REALM).getShortName();
-    // standard rules don't pick this up
-    // new HadoopKerberosName(ZOOKEEPER_LOCALHOST_REALM).getShortName();
-  }
-
-  @Test
-  public void testUGILogin() throws Throwable {
-
-    UserGroupInformation ugi = loginUGI(ZOOKEEPER, keytab_zk);
-    RegistrySecurity.UgiInfo ugiInfo =
-        new RegistrySecurity.UgiInfo(ugi);
-    LOG.info("logged in as: {}", ugiInfo);
-    assertTrue("security is not enabled: " + ugiInfo,
-        UserGroupInformation.isSecurityEnabled());
-    assertTrue("login is keytab based: " + ugiInfo,
-        ugi.isFromKeytab());
-
-    // now we are here, build a SASL ACL
-    ACL acl = ugi.doAs(new PrivilegedExceptionAction<ACL>() {
-      @Override
-      public ACL run() throws Exception {
-        return registrySecurity.createSaslACLFromCurrentUser(0);
-      }
-    });
-    assertEquals(ZOOKEEPER_REALM, acl.getId().getId());
-    assertEquals(ZookeeperConfigOptions.SCHEME_SASL, acl.getId().getScheme());
-    registrySecurity.addSystemACL(acl);
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java
deleted file mode 100644
index 9d5848e..0000000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.registry.secure;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.service.ServiceOperations;
-import org.apache.hadoop.registry.client.impl.zk.ZKPathDumper;
-import org.apache.hadoop.registry.client.impl.zk.CuratorService;
-import org.apache.hadoop.registry.client.impl.zk.RegistrySecurity;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.Login;
-import org.apache.zookeeper.server.ZooKeeperSaslServer;
-import org.apache.zookeeper.server.auth.SaslServerCallbackHandler;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.security.auth.login.AppConfigurationEntry;
-import javax.security.auth.login.LoginContext;
-
-import static org.apache.hadoop.registry.client.api.RegistryConstants.*;
-import static 
org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions.PROP_ZK_SASL_CLIENT_CONTEXT;
-import static 
org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions.PROP_ZK_SASL_CLIENT_USERNAME;
-
-/**
- * Verify that the Mini ZK service can be started up securely
- */
-public class TestSecureRegistry extends AbstractSecureRegistryTest {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestSecureRegistry.class);
-
-  @Before
-  public void beforeTestSecureZKService() throws Throwable {
-      enableKerberosDebugging();
-  }
-
-  @After
-  public void afterTestSecureZKService() throws Throwable {
-    disableKerberosDebugging();
-    RegistrySecurity.clearZKSaslClientProperties();
-  }
-
-  /**
-  * this is a cut and paste of some of the ZK internal code that was
-   * failing on windows and swallowing its exceptions
-   */
-  @Test
-  public void testLowlevelZKSaslLogin() throws Throwable {
-    RegistrySecurity.bindZKToServerJAASContext(ZOOKEEPER_SERVER_CONTEXT);
-    String serverSection =
-        System.getProperty(ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY,
-            ZooKeeperSaslServer.DEFAULT_LOGIN_CONTEXT_NAME);
-    assertEquals(ZOOKEEPER_SERVER_CONTEXT, serverSection);
-
-    AppConfigurationEntry entries[];
-    entries = javax.security.auth.login.Configuration.getConfiguration()
-                                                     .getAppConfigurationEntry(
-                                                         serverSection);
-
-    assertNotNull("null entries", entries);
-
-    SaslServerCallbackHandler saslServerCallbackHandler =
-        new SaslServerCallbackHandler(
-            javax.security.auth.login.Configuration.getConfiguration());
-    Login login = new Login(serverSection, saslServerCallbackHandler);
-    try {
-      login.startThreadIfNeeded();
-    } finally {
-      login.shutdown();
-    }
-  }
-
-  @Test
-  public void testCreateSecureZK() throws Throwable {
-    startSecureZK();
-    secureZK.stop();
-  }
-
-  @Test
-  public void testInsecureClientToZK() throws Throwable {
-    startSecureZK();
-    userZookeeperToCreateRoot();
-    RegistrySecurity.clearZKSaslClientProperties();
-
-    CuratorService curatorService =
-        startCuratorServiceInstance("insecure client", false);
-
-    curatorService.zkList("/");
-    curatorService.zkMkPath("", CreateMode.PERSISTENT, false,
-        RegistrySecurity.WorldReadWriteACL);
-  }
-
-  /**
-   * test that ZK can write as itself
-   * @throws Throwable
-   */
-  @Test
-  public void testZookeeperCanWrite() throws Throwable {
-
-    System.setProperty("curator-log-events", "true");
-    startSecureZK();
-    CuratorService curator = null;
-    LoginContext login = login(ZOOKEEPER_LOCALHOST,
-        ZOOKEEPER_CLIENT_CONTEXT,
-        keytab_zk);
-    try {
-      logLoginDetails(ZOOKEEPER, login);
-      RegistrySecurity.setZKSaslClientProperties(ZOOKEEPER,
-                                                ZOOKEEPER_CLIENT_CONTEXT);
-      curator = startCuratorServiceInstance("ZK", true);
-      LOG.info(curator.toString());
-
-      addToTeardown(curator);
-      curator.zkMkPath("/", CreateMode.PERSISTENT, false,
-          RegistrySecurity.WorldReadWriteACL);
-      curator.zkList("/");
-      curator.zkMkPath("/zookeeper", CreateMode.PERSISTENT, false,
-          RegistrySecurity.WorldReadWriteACL);
-    } finally {
-      logout(login);
-      ServiceOperations.stop(curator);
-    }
-  }
-
-  @Test
-  public void testSystemPropertyOverwrite() {
-    System.setProperty(PROP_ZK_SASL_CLIENT_USERNAME, "");
-    System.setProperty(PROP_ZK_SASL_CLIENT_CONTEXT, "");
-    RegistrySecurity.setZKSaslClientProperties(ZOOKEEPER,
-        ZOOKEEPER_CLIENT_CONTEXT);
-    assertEquals(ZOOKEEPER, System.getProperty(PROP_ZK_SASL_CLIENT_USERNAME));
-    assertEquals(ZOOKEEPER_CLIENT_CONTEXT,
-        System.getProperty(PROP_ZK_SASL_CLIENT_CONTEXT));
-
-    String userName = "user1";
-    String context = "context1";
-    System.setProperty(PROP_ZK_SASL_CLIENT_USERNAME, userName);
-    System.setProperty(PROP_ZK_SASL_CLIENT_CONTEXT, context);
-    RegistrySecurity.setZKSaslClientProperties(ZOOKEEPER,
-        ZOOKEEPER_CLIENT_CONTEXT);
-    assertEquals(userName, System.getProperty(PROP_ZK_SASL_CLIENT_USERNAME));
-    assertEquals(context, System.getProperty(PROP_ZK_SASL_CLIENT_CONTEXT));
-  }
-
-  /**
-   * Start a curator service instance
-   * @param name name
-   * @param secure flag to indicate the cluster is secure
-   * @return an inited and started curator service
-   */
-  protected CuratorService startCuratorServiceInstance(String name,
-      boolean secure) {
-    Configuration clientConf = new Configuration();
-    clientConf.set(KEY_REGISTRY_ZK_ROOT, "/");
-    clientConf.setBoolean(KEY_REGISTRY_SECURE, secure);
-    describe(LOG, "Starting Curator service");
-    CuratorService curatorService = new CuratorService(name, secureZK);
-    curatorService.init(clientConf);
-    curatorService.start();
-    LOG.info("Curator Binding {}",
-        curatorService.bindingDiagnosticDetails());
-    return curatorService;
-  }
-
-  /**
-   * have the ZK user create the root dir.
-   * This logs out the ZK user after and stops its curator instance,
-   * to avoid contamination
-   * @throws Throwable
-   */
-  public void userZookeeperToCreateRoot() throws Throwable {
-
-    System.setProperty("curator-log-events", "true");
-    CuratorService curator = null;
-    LoginContext login = login(ZOOKEEPER_LOCALHOST,
-        ZOOKEEPER_CLIENT_CONTEXT,
-        keytab_zk);
-    try {
-      logLoginDetails(ZOOKEEPER, login);
-      RegistrySecurity.setZKSaslClientProperties(ZOOKEEPER,
-          ZOOKEEPER_CLIENT_CONTEXT);
-      curator = startCuratorServiceInstance("ZK", true);
-      LOG.info(curator.toString());
-
-      addToTeardown(curator);
-      curator.zkMkPath("/", CreateMode.PERSISTENT, false,
-          RegistrySecurity.WorldReadWriteACL);
-      ZKPathDumper pathDumper = curator.dumpPath(true);
-      LOG.info(pathDumper.toString());
-    } finally {
-      logout(login);
-      ServiceOperations.stop(curator);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java
deleted file mode 100644
index a0c4ca3..0000000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java
+++ /dev/null
@@ -1,725 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.registry.server.dns;
-
-import org.apache.commons.net.util.Base64;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.registry.client.api.RegistryConstants;
-import org.apache.hadoop.registry.client.binding.RegistryUtils;
-import org.apache.hadoop.registry.client.types.ServiceRecord;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.xbill.DNS.AAAARecord;
-import org.xbill.DNS.ARecord;
-import org.xbill.DNS.CNAMERecord;
-import org.xbill.DNS.DClass;
-import org.xbill.DNS.DNSKEYRecord;
-import org.xbill.DNS.DNSSEC;
-import org.xbill.DNS.Flags;
-import org.xbill.DNS.Message;
-import org.xbill.DNS.Name;
-import org.xbill.DNS.OPTRecord;
-import org.xbill.DNS.PTRRecord;
-import org.xbill.DNS.RRSIGRecord;
-import org.xbill.DNS.RRset;
-import org.xbill.DNS.Rcode;
-import org.xbill.DNS.Record;
-import org.xbill.DNS.SRVRecord;
-import org.xbill.DNS.Section;
-import org.xbill.DNS.Type;
-
-import java.io.IOException;
-import java.math.BigInteger;
-import java.net.Inet6Address;
-import java.net.InetAddress;
-import java.security.KeyFactory;
-import java.security.PrivateKey;
-import java.security.spec.RSAPrivateKeySpec;
-import java.util.Calendar;
-import java.util.Date;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.registry.client.api.RegistryConstants.*;
-
-/**
- *
- */
-public class TestRegistryDNS extends Assert {
-
-  private RegistryDNS registryDNS;
-  private RegistryUtils.ServiceRecordMarshal marshal;
-
-  private static final String APPLICATION_RECORD = "{\n"
-      + "  \"type\" : \"JSONServiceRecord\",\n"
-      + "  \"description\" : \"Slider Application Master\",\n"
-      + "  \"external\" : [ {\n"
-      + "    \"api\" : 
\"classpath:org.apache.hadoop.yarn.service.appmaster.ipc"
-      + "\",\n"
-      + "    \"addressType\" : \"host/port\",\n"
-      + "    \"protocolType\" : \"hadoop/IPC\",\n"
-      + "    \"addresses\" : [ {\n"
-      + "      \"host\" : \"192.168.1.5\",\n"
-      + "      \"port\" : \"1026\"\n"
-      + "    } ]\n"
-      + "  }, {\n"
-      + "    \"api\" : \"http://\",\n";
-      + "    \"addressType\" : \"uri\",\n"
-      + "    \"protocolType\" : \"webui\",\n"
-      + "    \"addresses\" : [ {\n"
-      + "      \"uri\" : \"http://192.168.1.5:1027\"\n";
-      + "    } ]\n"
-      + "  }, {\n"
-      + "    \"api\" : \"classpath:org.apache.hadoop.yarn.service.management\""
-      + ",\n"
-      + "    \"addressType\" : \"uri\",\n"
-      + "    \"protocolType\" : \"REST\",\n"
-      + "    \"addresses\" : [ {\n"
-      + "      \"uri\" : \"http://192.168.1.5:1027/ws/v1/slider/mgmt\"\n";
-      + "    } ]\n"
-      + "  } ],\n"
-      + "  \"internal\" : [ {\n"
-      + "    \"api\" : 
\"classpath:org.apache.hadoop.yarn.service.agents.secure"
-      + "\",\n"
-      + "    \"addressType\" : \"uri\",\n"
-      + "    \"protocolType\" : \"REST\",\n"
-      + "    \"addresses\" : [ {\n"
-      + "      \"uri\" : \"https://192.168.1.5:47700/ws/v1/slider/agents\"\n";
-      + "    } ]\n"
-      + "  }, {\n"
-      + "    \"api\" : 
\"classpath:org.apache.hadoop.yarn.service.agents.oneway"
-      + "\",\n"
-      + "    \"addressType\" : \"uri\",\n"
-      + "    \"protocolType\" : \"REST\",\n"
-      + "    \"addresses\" : [ {\n"
-      + "      \"uri\" : \"https://192.168.1.5:35531/ws/v1/slider/agents\"\n";
-      + "    } ]\n"
-      + "  } ],\n"
-      + "  \"yarn:id\" : \"application_1451931954322_0016\",\n"
-      + "  \"yarn:persistence\" : \"application\"\n"
-      + "}\n";
-  static final String CONTAINER_RECORD = "{\n"
-      + "  \"type\" : \"JSONServiceRecord\",\n"
-      + "  \"description\" : \"httpd-1\",\n"
-      + "  \"external\" : [ ],\n"
-      + "  \"internal\" : [ ],\n"
-      + "  \"yarn:id\" : \"container_e50_1451931954322_0016_01_000002\",\n"
-      + "  \"yarn:persistence\" : \"container\",\n"
-      + "  \"yarn:ip\" : \"172.17.0.19\",\n"
-      + "  \"yarn:hostname\" : \"host1\",\n"
-      + "  \"yarn:component\" : \"httpd\"\n"
-      + "}\n";
-
-  static final String CONTAINER_RECORD2 = "{\n"
-      + "  \"type\" : \"JSONServiceRecord\",\n"
-      + "  \"description\" : \"httpd-2\",\n"
-      + "  \"external\" : [ ],\n"
-      + "  \"internal\" : [ ],\n"
-      + "  \"yarn:id\" : \"container_e50_1451931954322_0016_01_000003\",\n"
-      + "  \"yarn:persistence\" : \"container\",\n"
-      + "  \"yarn:ip\" : \"172.17.0.20\",\n"
-      + "  \"yarn:hostname\" : \"host2\",\n"
-      + "  \"yarn:component\" : \"httpd\"\n"
-      + "}\n";
-
-  private static final String CONTAINER_RECORD_NO_IP = "{\n"
-      + "  \"type\" : \"JSONServiceRecord\",\n"
-      + "  \"description\" : \"httpd-1\",\n"
-      + "  \"external\" : [ ],\n"
-      + "  \"internal\" : [ ],\n"
-      + "  \"yarn:id\" : \"container_e50_1451931954322_0016_01_000002\",\n"
-      + "  \"yarn:persistence\" : \"container\",\n"
-      + "  \"yarn:component\" : \"httpd\"\n"
-      + "}\n";
-
-  private static final String CONTAINER_RECORD_YARN_PERSISTANCE_ABSENT = "{\n"
-      + "  \"type\" : \"JSONServiceRecord\",\n"
-      + "  \"description\" : \"httpd-1\",\n"
-      + "  \"external\" : [ ],\n"
-      + "  \"internal\" : [ ],\n"
-      + "  \"yarn:id\" : \"container_e50_1451931954322_0016_01_000003\",\n"
-      + "  \"yarn:ip\" : \"172.17.0.19\",\n"
-      + "  \"yarn:hostname\" : \"0a134d6329bb\",\n"
-      + "  \"yarn:component\" : \"httpd\""
-      + "}\n";
-
-  @Before
-  public void initialize() throws Exception {
-    setRegistryDNS(new RegistryDNS("TestRegistry"));
-    Configuration conf = createConfiguration();
-
-    getRegistryDNS().setDomainName(conf);
-    getRegistryDNS().initializeZones(conf);
-
-    setMarshal(new RegistryUtils.ServiceRecordMarshal());
-  }
-
-  protected Configuration createConfiguration() {
-    Configuration conf = new Configuration();
-    conf.set(RegistryConstants.KEY_DNS_DOMAIN, "dev.test");
-    conf.set(RegistryConstants.KEY_DNS_ZONE_SUBNET, "172.17.0");
-    conf.setTimeDuration(RegistryConstants.KEY_DNS_TTL, 30L, TimeUnit.SECONDS);
-    return conf;
-  }
-
-  protected boolean isSecure() {
-    return false;
-  }
-
-  @After
-  public void closeRegistry() throws Exception {
-    getRegistryDNS().stopExecutor();
-  }
-
-  @Test
-  public void testAppRegistration() throws Exception {
-    ServiceRecord record = getMarshal().fromBytes("somepath",
-        APPLICATION_RECORD.getBytes());
-    getRegistryDNS().register(
-        "/registry/users/root/services/org-apache-slider/test1/", record);
-
-    // start assessing whether correct records are available
-    Record[] recs = assertDNSQuery("test1.root.dev.test.");
-    assertEquals("wrong result", "192.168.1.5",
-        ((ARecord) recs[0]).getAddress().getHostAddress());
-
-    recs = assertDNSQuery("management-api.test1.root.dev.test.", 2);
-    assertEquals("wrong target name", "test1.root.dev.test.",
-        ((CNAMERecord) recs[0]).getTarget().toString());
-    assertTrue("not an ARecord", recs[isSecure() ? 2 : 1] instanceof ARecord);
-
-    recs = assertDNSQuery("appmaster-ipc-api.test1.root.dev.test.",
-        Type.SRV, 1);
-    assertTrue("not an SRV record", recs[0] instanceof SRVRecord);
-    assertEquals("wrong port", 1026, ((SRVRecord) recs[0]).getPort());
-
-    recs = assertDNSQuery("appmaster-ipc-api.test1.root.dev.test.", 2);
-    assertEquals("wrong target name", "test1.root.dev.test.",
-        ((CNAMERecord) recs[0]).getTarget().toString());
-    assertTrue("not an ARecord", recs[isSecure() ? 2 : 1] instanceof ARecord);
-
-    recs = assertDNSQuery("http-api.test1.root.dev.test.", 2);
-    assertEquals("wrong target name", "test1.root.dev.test.",
-        ((CNAMERecord) recs[0]).getTarget().toString());
-    assertTrue("not an ARecord", recs[isSecure() ? 2 : 1] instanceof ARecord);
-
-    recs = assertDNSQuery("http-api.test1.root.dev.test.", Type.SRV,
-        1);
-    assertTrue("not an SRV record", recs[0] instanceof SRVRecord);
-    assertEquals("wrong port", 1027, ((SRVRecord) recs[0]).getPort());
-
-    assertDNSQuery("test1.root.dev.test.", Type.TXT, 3);
-    assertDNSQuery("appmaster-ipc-api.test1.root.dev.test.", Type.TXT, 1);
-    assertDNSQuery("http-api.test1.root.dev.test.", Type.TXT, 1);
-    assertDNSQuery("management-api.test1.root.dev.test.", Type.TXT, 1);
-  }
-
-  @Test
-  public void testContainerRegistration() throws Exception {
-    ServiceRecord record = getMarshal().fromBytes("somepath",
-        CONTAINER_RECORD.getBytes());
-    getRegistryDNS().register(
-        "/registry/users/root/services/org-apache-slider/test1/components/"
-            + "ctr-e50-1451931954322-0016-01-000002",
-        record);
-
-    // start assessing whether correct records are available
-    Record[] recs =
-        assertDNSQuery("ctr-e50-1451931954322-0016-01-000002.dev.test.");
-    assertEquals("wrong result", "172.17.0.19",
-        ((ARecord) recs[0]).getAddress().getHostAddress());
-
-    recs = assertDNSQuery("httpd-1.test1.root.dev.test.", 1);
-    assertTrue("not an ARecord", recs[0] instanceof ARecord);
-  }
-
-  @Test
-  public void testContainerRegistrationPersistanceAbsent() throws Exception {
-    ServiceRecord record = marshal.fromBytes("somepath",
-        CONTAINER_RECORD_YARN_PERSISTANCE_ABSENT.getBytes());
-    registryDNS.register(
-        "/registry/users/root/services/org-apache-slider/test1/components/"
-            + "ctr-e50-1451931954322-0016-01-000003",
-         record);
-
-    Name name =
-        Name.fromString("ctr-e50-1451931954322-0016-01-000002.dev.test.");
-    Record question = Record.newRecord(name, Type.A, DClass.IN);
-    Message query = Message.newQuery(question);
-    byte[] responseBytes = registryDNS.generateReply(query, null);
-    Message response = new Message(responseBytes);
-    assertEquals("Excepting NXDOMAIN as Record must not have regsisterd wrong",
-        Rcode.NXDOMAIN, response.getRcode());
-  }
-
-  @Test
-  public void testRecordTTL() throws Exception {
-    ServiceRecord record = getMarshal().fromBytes("somepath",
-        CONTAINER_RECORD.getBytes());
-    getRegistryDNS().register(
-        "/registry/users/root/services/org-apache-slider/test1/components/"
-            + "ctr-e50-1451931954322-0016-01-000002",
-        record);
-
-    // start assessing whether correct records are available
-    Record[] recs = assertDNSQuery(
-        "ctr-e50-1451931954322-0016-01-000002.dev.test.");
-    assertEquals("wrong result", "172.17.0.19",
-        ((ARecord) recs[0]).getAddress().getHostAddress());
-    assertEquals("wrong ttl", 30L, recs[0].getTTL());
-
-    recs = assertDNSQuery("httpd-1.test1.root.dev.test.", 1);
-    assertTrue("not an ARecord", recs[0] instanceof ARecord);
-
-    assertEquals("wrong ttl", 30L, recs[0].getTTL());
-  }
-
-  @Test
-  public void testReverseLookup() throws Exception {
-    ServiceRecord record = getMarshal().fromBytes("somepath",
-        CONTAINER_RECORD.getBytes());
-    getRegistryDNS().register(
-        "/registry/users/root/services/org-apache-slider/test1/components/"
-            + "ctr-e50-1451931954322-0016-01-000002",
-        record);
-
-    // start assessing whether correct records are available
-    Record[] recs = assertDNSQuery("19.0.17.172.in-addr.arpa.", Type.PTR, 1);
-    assertEquals("wrong result",
-        "httpd-1.test1.root.dev.test.",
-        ((PTRRecord) recs[0]).getTarget().toString());
-  }
-
-  @Test
-  public void testReverseLookupInLargeNetwork() throws Exception {
-    setRegistryDNS(new RegistryDNS("TestRegistry"));
-    Configuration conf = createConfiguration();
-    conf.set(RegistryConstants.KEY_DNS_DOMAIN, "dev.test");
-    conf.set(KEY_DNS_ZONE_SUBNET, "172.17.0.0");
-    conf.set(KEY_DNS_ZONE_MASK, "255.255.224.0");
-    conf.setTimeDuration(RegistryConstants.KEY_DNS_TTL, 30L, TimeUnit.SECONDS);
-
-    getRegistryDNS().setDomainName(conf);
-    getRegistryDNS().initializeZones(conf);
-
-    ServiceRecord record = getMarshal().fromBytes("somepath",
-        CONTAINER_RECORD.getBytes());
-    getRegistryDNS().register(
-        "/registry/users/root/services/org-apache-slider/test1/components/"
-            + "ctr-e50-1451931954322-0016-01-000002",
-        record);
-
-    // start assessing whether correct records are available
-    Record[] recs = assertDNSQuery("19.0.17.172.in-addr.arpa.", Type.PTR, 1);
-    assertEquals("wrong result",
-        "httpd-1.test1.root.dev.test.",
-        ((PTRRecord) recs[0]).getTarget().toString());
-  }
-
-  @Test
-  public void testMissingReverseLookup() throws Exception {
-    ServiceRecord record = getMarshal().fromBytes("somepath",
-        CONTAINER_RECORD.getBytes());
-    getRegistryDNS().register(
-        "/registry/users/root/services/org-apache-slider/test1/components/"
-            + "ctr-e50-1451931954322-0016-01-000002",
-        record);
-
-    // start assessing whether correct records are available
-    Name name = Name.fromString("19.1.17.172.in-addr.arpa.");
-    Record question = Record.newRecord(name, Type.PTR, DClass.IN);
-    Message query = Message.newQuery(question);
-    OPTRecord optRecord = new OPTRecord(4096, 0, 0, Flags.DO, null);
-    query.addRecord(optRecord, Section.ADDITIONAL);
-    byte[] responseBytes = getRegistryDNS().generateReply(query, null);
-    Message response = new Message(responseBytes);
-    assertEquals("Missing record should be: ", Rcode.NXDOMAIN,
-        response.getRcode());
-  }
-
-  @Test
-  public void testNoContainerIP() throws Exception {
-    ServiceRecord record = getMarshal().fromBytes("somepath",
-        CONTAINER_RECORD_NO_IP.getBytes());
-    getRegistryDNS().register(
-        "/registry/users/root/services/org-apache-slider/test1/components/"
-            + "ctr-e50-1451931954322-0016-01-000002",
-        record);
-
-    // start assessing whether correct records are available
-    Name name =
-        Name.fromString("ctr-e50-1451931954322-0016-01-000002.dev.test.");
-    Record question = Record.newRecord(name, Type.A, DClass.IN);
-    Message query = Message.newQuery(question);
-
-    byte[] responseBytes = getRegistryDNS().generateReply(query, null);
-    Message response = new Message(responseBytes);
-    assertEquals("wrong status", Rcode.NXDOMAIN, response.getRcode());
-  }
-
-  private Record[] assertDNSQuery(String lookup) throws IOException {
-    return assertDNSQuery(lookup, Type.A, 1);
-  }
-
-  private Record[] assertDNSQuery(String lookup, int numRecs)
-      throws IOException {
-    return assertDNSQuery(lookup, Type.A, numRecs);
-  }
-
-  Record[] assertDNSQuery(String lookup, int type, int numRecs)
-      throws IOException {
-    Name name = Name.fromString(lookup);
-    Record question = Record.newRecord(name, type, DClass.IN);
-    Message query = Message.newQuery(question);
-    OPTRecord optRecord = new OPTRecord(4096, 0, 0, Flags.DO, null);
-    query.addRecord(optRecord, Section.ADDITIONAL);
-    byte[] responseBytes = getRegistryDNS().generateReply(query, null);
-    Message response = new Message(responseBytes);
-    assertEquals("not successful", Rcode.NOERROR, response.getRcode());
-    assertNotNull("Null response", response);
-    assertEquals("Questions do not match", query.getQuestion(),
-        response.getQuestion());
-    Record[] recs = response.getSectionArray(Section.ANSWER);
-    assertEquals("wrong number of answer records",
-        isSecure() ? numRecs * 2 : numRecs, recs.length);
-    if (isSecure()) {
-      boolean signed = false;
-      for (Record record : recs) {
-        signed = record.getType() == Type.RRSIG;
-        if (signed) {
-          break;
-        }
-      }
-      assertTrue("No signatures found", signed);
-    }
-    return recs;
-  }
-
-  Record[] assertDNSQueryNotNull(String lookup, int type, int answerCount)
-      throws IOException {
-    Name name = Name.fromString(lookup);
-    Record question = Record.newRecord(name, type, DClass.IN);
-    Message query = Message.newQuery(question);
-    OPTRecord optRecord = new OPTRecord(4096, 0, 0, Flags.DO, null);
-    query.addRecord(optRecord, Section.ADDITIONAL);
-    byte[] responseBytes = getRegistryDNS().generateReply(query, null);
-    Message response = new Message(responseBytes);
-    assertEquals("not successful", Rcode.NOERROR, response.getRcode());
-    assertNotNull("Null response", response);
-    assertEquals("Questions do not match", query.getQuestion(),
-        response.getQuestion());
-    Record[] recs = response.getSectionArray(Section.ANSWER);
-    assertEquals(answerCount, recs.length);
-    assertEquals(recs[0].getType(), type);
-    return recs;
-  }
-
-  @Test
-  public void testDNSKEYRecord() throws Exception {
-    String publicK =
-        "AwEAAe1Jev0Az1khlQCvf0nud1/CNHQwwPEu8BNchZthdDxKPVn29yrD "
-            + "CHoAWjwiGsOSw3SzIPrawSbHzyJsjn0oLBhGrH6QedFGnydoxjNsw3m/ "
-            + "SCmOjR/a7LGBAMDFKqFioi4gOyuN66svBeY+/5uw72+0ei9AQ20gqf6q "
-            + "l9Ozs5bV";
-    //    byte[] publicBytes = Base64.decodeBase64(publicK);
-    //    X509EncodedKeySpec keySpec = new X509EncodedKeySpec(publicBytes);
-    //    KeyFactory keyFactory = KeyFactory.getInstance("RSA");
-    //    PublicKey pubKey = keyFactory.generatePublic(keySpec);
-    DNSKEYRecord dnskeyRecord =
-        new DNSKEYRecord(Name.fromString("dev.test."), DClass.IN, 0,
-            DNSKEYRecord.Flags.ZONE_KEY,
-            DNSKEYRecord.Protocol.DNSSEC,
-            DNSSEC.Algorithm.RSASHA256,
-            Base64.decodeBase64(publicK.getBytes()));
-    assertNotNull(dnskeyRecord);
-    RSAPrivateKeySpec privateSpec = new RSAPrivateKeySpec(new BigInteger(1,
-        Base64.decodeBase64(
-            "7Ul6/QDPWSGVAK9/Se53X8I0dDDA8S7wE1yFm2F0PEo9Wfb3KsMIegBaPCIaw5LDd"
-                + "LMg+trBJsfPImyOfSgsGEasfpB50UafJ2jGM2zDeb9IKY6NH9rssYEAwMUq"
-                + "oWKiLiA7K43rqy8F5j7/m7Dvb7R6L0BDbSCp/qqX07OzltU=")),
-        new BigInteger(1, Base64.decodeBase64(
-            "MgbQ6DBYhskeufNGGdct0cGG/4wb0X183ggenwCv2dopDyOTPq+5xMb4Pz9Ndzgk/"
-                + "yCY7mpaWIu9rttGOzrR+LBRR30VobPpMK1bMnzu2C0x08oYAguVwZB79DLC"
-                + "705qmZpiaaFB+LnhG7VtpPiOBm3UzZxdrBfeq/qaKrXid60=")));
-    KeyFactory factory = KeyFactory.getInstance("RSA");
-    PrivateKey priv = factory.generatePrivate(privateSpec);
-
-    ARecord aRecord = new ARecord(Name.fromString("some.test."), DClass.IN, 0,
-        InetAddress.getByName("192.168.0.1"));
-    Calendar cal = Calendar.getInstance();
-    Date inception = cal.getTime();
-    cal.add(Calendar.YEAR, 1);
-    Date expiration = cal.getTime();
-    RRset rrset = new RRset(aRecord);
-    RRSIGRecord rrsigRecord = DNSSEC.sign(rrset,
-        dnskeyRecord,
-        priv,
-        inception,
-        expiration);
-    DNSSEC.verify(rrset, rrsigRecord, dnskeyRecord);
-
-  }
-
-  @Test
-  public void testIpv4toIpv6() throws Exception {
-    InetAddress address =
-        BaseServiceRecordProcessor
-            .getIpv6Address(InetAddress.getByName("172.17.0.19"));
-    assertTrue("not an ipv6 address", address instanceof Inet6Address);
-    assertEquals("wrong IP", "172.17.0.19",
-        InetAddress.getByAddress(address.getAddress()).getHostAddress());
-  }
-
-  @Test
-  public void testAAAALookup() throws Exception {
-    ServiceRecord record = getMarshal().fromBytes("somepath",
-        CONTAINER_RECORD.getBytes());
-    getRegistryDNS().register(
-        "/registry/users/root/services/org-apache-slider/test1/components/"
-            + "ctr-e50-1451931954322-0016-01-000002",
-        record);
-
-    // start assessing whether correct records are available
-    Record[] recs = assertDNSQuery(
-        "ctr-e50-1451931954322-0016-01-000002.dev.test.", Type.AAAA, 1);
-    assertEquals("wrong result", "172.17.0.19",
-        ((AAAARecord) recs[0]).getAddress().getHostAddress());
-
-    recs = assertDNSQuery("httpd-1.test1.root.dev.test.", Type.AAAA, 1);
-    assertTrue("not an ARecord", recs[0] instanceof AAAARecord);
-  }
-
-  @Test
-  public void testNegativeLookup() throws Exception {
-    ServiceRecord record = getMarshal().fromBytes("somepath",
-        CONTAINER_RECORD.getBytes());
-    getRegistryDNS().register(
-        "/registry/users/root/services/org-apache-slider/test1/components/"
-            + "ctr-e50-1451931954322-0016-01-000002",
-        record);
-
-    // start assessing whether correct records are available
-    Name name = Name.fromString("missing.dev.test.");
-    Record question = Record.newRecord(name, Type.A, DClass.IN);
-    Message query = Message.newQuery(question);
-
-    byte[] responseBytes = getRegistryDNS().generateReply(query, null);
-    Message response = new Message(responseBytes);
-    assertEquals("not successful", Rcode.NXDOMAIN, response.getRcode());
-    assertNotNull("Null response", response);
-    assertEquals("Questions do not match", query.getQuestion(),
-        response.getQuestion());
-    Record[] sectionArray = response.getSectionArray(Section.AUTHORITY);
-    assertEquals("Wrong number of recs in AUTHORITY", isSecure() ? 2 : 1,
-        sectionArray.length);
-    boolean soaFound = false;
-    for (Record rec : sectionArray) {
-      soaFound = rec.getType() == Type.SOA;
-      if (soaFound) {
-        break;
-      }
-    }
-    assertTrue("wrong record type",
-        soaFound);
-
-  }
-
-  @Test
-  public void testReadMasterFile() throws Exception {
-    setRegistryDNS(new RegistryDNS("TestRegistry"));
-    Configuration conf = new Configuration();
-    conf.set(RegistryConstants.KEY_DNS_DOMAIN, "dev.test");
-    conf.set(RegistryConstants.KEY_DNS_ZONE_SUBNET, "172.17.0");
-    conf.setTimeDuration(RegistryConstants.KEY_DNS_TTL, 30L, TimeUnit.SECONDS);
-    conf.set(RegistryConstants.KEY_DNS_ZONES_DIR,
-        getClass().getResource("/").getFile());
-    if (isSecure()) {
-      conf.setBoolean(RegistryConstants.KEY_DNSSEC_ENABLED, true);
-      conf.set(RegistryConstants.KEY_DNSSEC_PUBLIC_KEY,
-          "AwEAAe1Jev0Az1khlQCvf0nud1/CNHQwwPEu8BNchZthdDxKPVn29yrD "
-              + "CHoAWjwiGsOSw3SzIPrawSbHzyJsjn0oLBhGrH6QedFGnydoxjNsw3m/ "
-              + "SCmOjR/a7LGBAMDFKqFioi4gOyuN66svBeY+/5uw72+0ei9AQ20gqf6q "
-              + "l9Ozs5bV");
-      conf.set(RegistryConstants.KEY_DNSSEC_PRIVATE_KEY_FILE,
-          getClass().getResource("/test.private").getFile());
-    }
-
-    getRegistryDNS().setDomainName(conf);
-    getRegistryDNS().initializeZones(conf);
-
-    ServiceRecord record = getMarshal().fromBytes("somepath",
-        CONTAINER_RECORD.getBytes());
-    getRegistryDNS().register(
-        "/registry/users/root/services/org-apache-slider/test1/components/"
-            + "ctr-e50-1451931954322-0016-01-000002",
-        record);
-
-    // start assessing whether correct records are available
-    Record[] recs =
-        assertDNSQuery("ctr-e50-1451931954322-0016-01-000002.dev.test.");
-    assertEquals("wrong result", "172.17.0.19",
-        ((ARecord) recs[0]).getAddress().getHostAddress());
-
-    recs = assertDNSQuery("httpd-1.test1.root.dev.test.", 1);
-    assertTrue("not an ARecord", recs[0] instanceof ARecord);
-
-    // lookup dyanmic reverse records
-    recs = assertDNSQuery("19.0.17.172.in-addr.arpa.", Type.PTR, 1);
-    assertEquals("wrong result",
-        "httpd-1.test1.root.dev.test.",
-        ((PTRRecord) recs[0]).getTarget().toString());
-
-    // now lookup static reverse records
-    Name name = Name.fromString("5.0.17.172.in-addr.arpa.");
-    Record question = Record.newRecord(name, Type.PTR, DClass.IN);
-    Message query = Message.newQuery(question);
-    OPTRecord optRecord = new OPTRecord(4096, 0, 0, Flags.DO, null);
-    query.addRecord(optRecord, Section.ADDITIONAL);
-    byte[] responseBytes = getRegistryDNS().generateReply(query, null);
-    Message response = new Message(responseBytes);
-    recs = response.getSectionArray(Section.ANSWER);
-    assertEquals("wrong result", "cn005.dev.test.",
-        ((PTRRecord) recs[0]).getTarget().toString());
-  }
-
-  @Test
-  public void testReverseZoneNames() throws Exception {
-    Configuration conf = new Configuration();
-    conf.set(KEY_DNS_ZONE_SUBNET, "172.26.32.0");
-    conf.set(KEY_DNS_ZONE_MASK, "255.255.224.0");
-
-    Name name = getRegistryDNS().getReverseZoneName(conf);
-    assertEquals("wrong name", "26.172.in-addr.arpa.", name.toString());
-  }
-
-  @Test
-  public void testSplitReverseZoneNames() throws Exception {
-    Configuration conf = new Configuration();
-    registryDNS = new RegistryDNS("TestRegistry");
-    conf.set(RegistryConstants.KEY_DNS_DOMAIN, "example.com");
-    conf.set(KEY_DNS_SPLIT_REVERSE_ZONE, "true");
-    conf.set(KEY_DNS_SPLIT_REVERSE_ZONE_RANGE, "256");
-    conf.set(KEY_DNS_ZONE_SUBNET, "172.26.32.0");
-    conf.set(KEY_DNS_ZONE_MASK, "255.255.224.0");
-    conf.setTimeDuration(RegistryConstants.KEY_DNS_TTL, 30L, TimeUnit.SECONDS);
-    conf.set(RegistryConstants.KEY_DNS_ZONES_DIR,
-        getClass().getResource("/").getFile());
-    if (isSecure()) {
-      conf.setBoolean(RegistryConstants.KEY_DNSSEC_ENABLED, true);
-      conf.set(RegistryConstants.KEY_DNSSEC_PUBLIC_KEY,
-          "AwEAAe1Jev0Az1khlQCvf0nud1/CNHQwwPEu8BNchZthdDxKPVn29yrD "
-              + "CHoAWjwiGsOSw3SzIPrawSbHzyJsjn0oLBhGrH6QedFGnydoxjNsw3m/ "
-              + "SCmOjR/a7LGBAMDFKqFioi4gOyuN66svBeY+/5uw72+0ei9AQ20gqf6q "
-              + "l9Ozs5bV");
-      conf.set(RegistryConstants.KEY_DNSSEC_PRIVATE_KEY_FILE,
-          getClass().getResource("/test.private").getFile());
-    }
-    registryDNS.setDomainName(conf);
-    registryDNS.setDNSSECEnabled(conf);
-    registryDNS.addSplitReverseZones(conf, 4);
-    assertEquals(4, registryDNS.getZoneCount());
-  }
-
-  @Test
-  public void testExampleDotCom() throws Exception {
-    Name name = Name.fromString("example.com.");
-    Record[] records = getRegistryDNS().getRecords(name, Type.SOA);
-    assertNotNull("example.com exists:", records);
-  }
-
-  @Test
-  public void testExternalCNAMERecord() throws Exception {
-    setRegistryDNS(new RegistryDNS("TestRegistry"));
-    Configuration conf = new Configuration();
-    conf.set(RegistryConstants.KEY_DNS_DOMAIN, "dev.test");
-    conf.set(RegistryConstants.KEY_DNS_ZONE_SUBNET, "172.17.0");
-    conf.setTimeDuration(RegistryConstants.KEY_DNS_TTL, 30L, TimeUnit.SECONDS);
-    conf.set(RegistryConstants.KEY_DNS_ZONES_DIR,
-        getClass().getResource("/").getFile());
-    getRegistryDNS().setDomainName(conf);
-    getRegistryDNS().initializeZones(conf);
-
-    // start assessing whether correct records are available
-    Record[] recs =
-        assertDNSQueryNotNull("mail.yahoo.com.", Type.CNAME, 1);
-  }
-
-  @Test
-  public void testRootLookup() throws Exception {
-    setRegistryDNS(new RegistryDNS("TestRegistry"));
-    Configuration conf = new Configuration();
-    conf.set(RegistryConstants.KEY_DNS_DOMAIN, "dev.test");
-    conf.set(RegistryConstants.KEY_DNS_ZONE_SUBNET, "172.17.0");
-    conf.setTimeDuration(RegistryConstants.KEY_DNS_TTL, 30L, TimeUnit.SECONDS);
-    conf.set(RegistryConstants.KEY_DNS_ZONES_DIR,
-        getClass().getResource("/").getFile());
-    getRegistryDNS().setDomainName(conf);
-    getRegistryDNS().initializeZones(conf);
-
-    // start assessing whether correct records are available
-    Record[] recs =
-        assertDNSQueryNotNull(".", Type.NS, 13);
-  }
-
-  @Test
-  public void testMultiARecord() throws Exception {
-    ServiceRecord record = getMarshal().fromBytes("somepath",
-        CONTAINER_RECORD.getBytes());
-    ServiceRecord record2 = getMarshal().fromBytes("somepath",
-        CONTAINER_RECORD2.getBytes());
-    getRegistryDNS().register(
-        "/registry/users/root/services/org-apache-slider/test1/components/"
-            + "ctr-e50-1451931954322-0016-01-000002",
-        record);
-    getRegistryDNS().register(
-        "/registry/users/root/services/org-apache-slider/test1/components/"
-            + "ctr-e50-1451931954322-0016-01-000003",
-        record2);
-
-    // start assessing whether correct records are available
-    Record[] recs =
-        assertDNSQuery("httpd.test1.root.dev.test.", 2);
-    assertTrue("not an ARecord", recs[0] instanceof ARecord);
-    assertTrue("not an ARecord", recs[1] instanceof ARecord);
-  }
-
-  @Test(timeout=5000)
-  public void testUpstreamFault() throws Exception {
-    Name name = Name.fromString("19.0.17.172.in-addr.arpa.");
-    Record[] recs = getRegistryDNS().getRecords(name, Type.CNAME);
-    assertNull("Record is not null", recs);
-  }
-
-  public RegistryDNS getRegistryDNS() {
-    return registryDNS;
-  }
-
-  public void setRegistryDNS(
-      RegistryDNS registryDNS) {
-    this.registryDNS = registryDNS;
-  }
-
-  public RegistryUtils.ServiceRecordMarshal getMarshal() {
-    return marshal;
-  }
-
-  public void setMarshal(
-      RegistryUtils.ServiceRecordMarshal marshal) {
-    this.marshal = marshal;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestReverseZoneUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestReverseZoneUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestReverseZoneUtils.java
deleted file mode 100644
index 1331f75..0000000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestReverseZoneUtils.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.registry.server.dns;
-
-import java.net.UnknownHostException;
-import static org.junit.Assert.assertEquals;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-/**
- * Tests for the reverse zone utilities.
- */
-public class TestReverseZoneUtils {
-  private static final String NET = "172.17.4.0";
-  private static final int RANGE = 256;
-  private static final int INDEX = 0;
-
-  @Rule public ExpectedException exception = ExpectedException.none();
-
-  @Test
-  public void testGetReverseZoneNetworkAddress() throws Exception {
-    assertEquals("172.17.4.0",
-        ReverseZoneUtils.getReverseZoneNetworkAddress(NET, RANGE, INDEX));
-  }
-
-  @Test
-  public void testSplitIp() throws Exception {
-    long[] splitIp = ReverseZoneUtils.splitIp(NET);
-    assertEquals(172, splitIp[0]);
-    assertEquals(17, splitIp[1]);
-    assertEquals(4, splitIp[2]);
-    assertEquals(0, splitIp[3]);
-  }
-
-  @Test
-  public void testThrowIllegalArgumentExceptionIfIndexIsNegative()
-      throws Exception {
-    exception.expect(IllegalArgumentException.class);
-    ReverseZoneUtils.getReverseZoneNetworkAddress(NET, RANGE, -1);
-  }
-
-  @Test
-  public void testThrowUnknownHostExceptionIfIpIsInvalid() throws Exception {
-    exception.expect(UnknownHostException.class);
-    ReverseZoneUtils
-        .getReverseZoneNetworkAddress("213124.21231.14123.13", RANGE, INDEX);
-  }
-
-  @Test
-  public void testThrowIllegalArgumentExceptionIfRangeIsNegative()
-      throws Exception {
-    exception.expect(IllegalArgumentException.class);
-    ReverseZoneUtils.getReverseZoneNetworkAddress(NET, -1, INDEX);
-  }
-
-  @Test
-  public void testVariousRangeAndIndexValues() throws Exception {
-    // Given the base address of 172.17.4.0, step 256 IP addresses, 5 times.
-    assertEquals("172.17.9.0",
-        ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 256, 5));
-    assertEquals("172.17.4.128",
-        ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 128, 1));
-    assertEquals("172.18.0.0",
-        ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 256, 252));
-    assertEquals("172.17.12.0",
-        ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 1024, 2));
-    assertEquals("172.17.4.0",
-        ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 0, 1));
-    assertEquals("172.17.4.0",
-        ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 1, 0));
-    assertEquals("172.17.4.1",
-        ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 1, 1));
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestSecureRegistryDNS.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestSecureRegistryDNS.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestSecureRegistryDNS.java
deleted file mode 100644
index ded63bd..0000000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestSecureRegistryDNS.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.registry.server.dns;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.registry.client.api.RegistryConstants;
-
-/**
- *
- */
-public class TestSecureRegistryDNS extends TestRegistryDNS {
-  @Override protected Configuration createConfiguration() {
-    Configuration conf = super.createConfiguration();
-    conf.setBoolean(RegistryConstants.KEY_DNSSEC_ENABLED, true);
-    conf.set(RegistryConstants.KEY_DNSSEC_PUBLIC_KEY,
-        "AwEAAe1Jev0Az1khlQCvf0nud1/CNHQwwPEu8BNchZthdDxKPVn29yrD "
-            + "CHoAWjwiGsOSw3SzIPrawSbHzyJsjn0oLBhGrH6QedFGnydoxjNsw3m/ "
-            + "SCmOjR/a7LGBAMDFKqFioi4gOyuN66svBeY+/5uw72+0ei9AQ20gqf6q "
-            + "l9Ozs5bV");
-    conf.set(RegistryConstants.KEY_DNSSEC_PRIVATE_KEY_FILE,
-        getClass().getResource("/test.private").getFile());
-
-    return conf;
-  }
-
-  @Override protected boolean isSecure() {
-    return true;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/resources/0.17.172.in-addr.arpa.zone
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/resources/0.17.172.in-addr.arpa.zone
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/resources/0.17.172.in-addr.arpa.zone
deleted file mode 100644
index 08071e2..0000000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/resources/0.17.172.in-addr.arpa.zone
+++ /dev/null
@@ -1,36 +0,0 @@
-;
-; Licensed to the Apache Software Foundation (ASF) under one
-; or more contributor license agreements.  See the NOTICE file
-; distributed with this work for additional information
-; regarding copyright ownership.  The ASF licenses this file
-; to you under the Apache License, Version 2.0 (the
-; "License"); you may not use this file except in compliance
-; with the License.  You may obtain a copy of the License at
-;
-;      http://www.apache.org/licenses/LICENSE-2.0
-;
-; Unless required by applicable law or agreed to in writing, software
-; distributed under the License is distributed on an "AS IS" BASIS,
-; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-; See the License for the specific language governing permissions and
-; limitations under the License.
-;
-;
-$ORIGIN .
-$TTL 1800 ; 30 minutes
-0.17.172.in-addr.arpa IN SOA ns.hwhq.hortonworks.com. it.hortonworks.com. (
-  2015081000 ; serial
-  10800      ; refresh (3 hours)
-  900        ; retry (15 minutes)
-  1814400    ; expire (3 weeks)
-  10800      ; minimum (3 hours)
-)
- NS ns.hwhq.hortonworks.com.
- NS ns2.hwhq.hortonworks.com.
-
-$ORIGIN 0.17.172.in-addr.arpa.
-5  PTR  cn005.dev.test.
-6  PTR  cn006.dev.test.
-7  PTR  cn007.dev.test.
-8  PTR  cn008.dev.test.
-9  PTR  cn009.dev.test.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/resources/log4j.properties
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/resources/log4j.properties
deleted file mode 100644
index bed1abc..0000000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/resources/log4j.properties
+++ /dev/null
@@ -1,63 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-#  or more contributor license agreements.  See the NOTICE file
-#  distributed with this work for additional information
-#  regarding copyright ownership.  The ASF licenses this file
-#  to you under the Apache License, Version 2.0 (the
-#  "License"); you may not use this file except in compliance
-#  with the License.  You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-
-# log4j configuration used during build and unit tests
-
-log4j.rootLogger=INFO,stdout
-log4j.threshold=ALL
-log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} 
(%F:%M(%L)) - %m%n
-
-log4j.appender.subprocess=org.apache.log4j.ConsoleAppender
-log4j.appender.subprocess.layout=org.apache.log4j.PatternLayout
-log4j.appender.subprocess.layout.ConversionPattern=[%c{1}]: %m%n
-
-# packages under test
-log4j.logger.org.apache.hadoop.yarn.registry=DEBUG
-log4j.logger.org.apache.hadoop.service=DEBUG
-
-log4j.logger.org.apache.hadoop.security.UserGroupInformation=DEBUG
-
-
-#crank back on some noise
-log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
-log4j.logger.org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner=WARN
-log4j.logger.org.apache.hadoop.hdfs.server.blockmanagement=WARN
-log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=WARN
-log4j.logger.org.apache.hadoop.hdfs=WARN
-
-
-log4j.logger.org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor=WARN
-log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl=WARN
-log4j.logger.org.apache.zookeeper=INFO
-log4j.logger.org.apache.zookeeper.ClientCnxn=DEBUG
-
-log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.security=WARN
-log4j.logger.org.apache.hadoop.metrics2=ERROR
-log4j.logger.org.apache.hadoop.util.HostsFileReader=WARN
-log4j.logger.org.apache.hadoop.yarn.event.AsyncDispatcher=WARN
-log4j.logger.org.apache.hadoop.security.token.delegation=WARN
-log4j.logger.org.apache.hadoop.yarn.util.AbstractLivelinessMonitor=WARN
-log4j.logger.org.apache.hadoop.yarn.server.nodemanager.security=WARN
-log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMNMInfo=WARN
-
-# curator noise
-log4j.logger.org.apache.curator.framework.imps=WARN
-log4j.logger.org.apache.curator.framework.state.ConnectionStateManager=ERROR
-
-log4j.logger.org.apache.directory.api.ldap=ERROR
-log4j.logger.org.apache.directory.server=ERROR
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/resources/test.private
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/resources/test.private
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/resources/test.private
deleted file mode 100644
index 5f0da9d..0000000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/resources/test.private
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-Private-key-format: v1.3
-Algorithm: 8 (RSASHA256)
-Modulus: 
7Ul6/QDPWSGVAK9/Se53X8I0dDDA8S7wE1yFm2F0PEo9Wfb3KsMIegBaPCIaw5LDdLMg+trBJsfPImyOfSgsGEasfpB50UafJ2jGM2zDeb9IKY6NH9rssYEAwMUqoWKiLiA7K43rqy8F5j7/m7Dvb7R6L0BDbSCp/qqX07OzltU=
-PublicExponent: AQAB
-PrivateExponent: 
MgbQ6DBYhskeufNGGdct0cGG/4wb0X183ggenwCv2dopDyOTPq+5xMb4Pz9Ndzgk/yCY7mpaWIu9rttGOzrR+LBRR30VobPpMK1bMnzu2C0x08oYAguVwZB79DLC705qmZpiaaFB+LnhG7VtpPiOBm3UzZxdrBfeq/qaKrXid60=
-Prime1: 
/HFdjI4cRuJBjK9IGWWmmVZWwaFsQYO9GHLCDwjm691GxaDpXuMdPd0uH9EqQvskyF8JPmzQXI43swyUFjizow==
-Prime2: 
8KFxkWEHlhgB2GLi8tk39TKY5vmFUvh4FO28COl1N/rWjKVpfM1p6HQ6YavoGNZQmDBazv4WOZRqSQukHApzJw==
-Exponent1: 
alX+h/RcqOcpoW88OaZ99N1PkiTDCx3JC4FbiSXAz93Xr+vGIfgdGzAN+80JtklABz8xD6CabEJj6AIGZw3fbQ==
-Exponent2: 
vvPusqZkJcjBVh0K6hpUXKEdU1W5ZmFEsZ8Cs7PH0Hee4Je3QVGk9NGfLrkDgwo3hL4CofZiXqkXOwYg4husyw==
-Coefficient: 
omxpbNU6u/swbnkTC6MicaDqbJP7ETnCCJ1iN2+HZO/AlQCFlqVzLwGZmvGMAGA9ZWF+YpqpPhvzi4bWmi5XrQ==
-Created: 20160119155251
-Publish: 20160119155251
-Activate: 20160119155251
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index 18edc53..45cc5ea 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -68,7 +68,7 @@
 
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-yarn-registry</artifactId>
+      <artifactId>hadoop-registry</artifactId>
     </dependency>
 
     <dependency>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
index 0bda0c7..6242215 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
@@ -53,7 +53,7 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-yarn-registry</artifactId>
+      <artifactId>hadoop-registry</artifactId>
     </dependency>
     <dependency>
       <groupId>javax.xml.bind</groupId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/index.md
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/index.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/index.md
deleted file mode 100644
index 0a127cd..0000000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/index.md
+++ /dev/null
@@ -1,30 +0,0 @@
-<!---
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# YARN Service Registry
-
-The Service registry is a service which can be deployed in a Hadoop cluster
-to allow deployed applications to register themselves and the means of
-communicating with them. Client applications can then locate services
-and use the binding information to connect with the services's 
network-accessible
-endpoints, be they REST, IPC, Web UI, Zookeeper quorum+path or some other 
protocol.
-Currently, all the registry data is stored in a zookeeper cluster.
-
-* [Architecture](yarn-registry.html)
-* [Configuration](registry-configuration.html)
-* [Using the YARN Service registry](using-the-yarn-service-registry.html)
-* [Security](registry-security.html)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a9fa84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md
deleted file mode 100644
index 1d03f8d..0000000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md
+++ /dev/null
@@ -1,397 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-# Registry Configuration
-
-
-The YARN service registry is built on top of Apache Zookeeper.
-It is configured by way of a Hadoop `Configuration` class:
-the instance used to create the service controls the behavior of the client.
-
-This document lists the configuration parameters which control the
-registry client and its deployment in the YARN Resource Manager.
-
-The default values of all these settings are defined in `core-default.xml`.
-The values in this file may not match those listed in this document.
-If this is the case, the values in `core-default.xml` MUST be considered 
normative.
-
-## Changing the configuration values
-
-Changes to the configuration values SHOULD be done in `core-site.xml`.
-This will ensure that client and non-YARN applications will pick up
-the values, so enabling them to read from and potentially write to the 
registry.
-
-
-## Core Settings
-
-
-### Setting the Zookeeper Quorum: `hadoop.registry.zk.quorum`
-
-This is an essential setting: it identifies the lists of zookeeper hosts
-and the ports on which the ZK services are listening.
-
-
-```
-  <property>
-    <description>
-      A comma separated list of hostname:port pairs defining the
-      zookeeper quorum binding for the registry
-    </description>
-    <name>hadoop.registry.zk.quorum</name>
-    <value>localhost:2181</value>
-  </property>
-```
-
-It takes a comma-separated list, such as `zk1:2181 ,zk2:2181, zk3:2181`
-
-### Setting the Zookeeper Registry Base path: `hadoop.registry.zk.root`
-
-This path sets the base zookeeper node for the registry
-
-```
-  <property>
-    <description>
-      The root zookeeper node for the registry
-    </description>
-    <name>hadoop.registry.zk.root</name>
-    <value>/registry</value>
-  </property>
-```
-
-The default value of `/registry` is normally sufficient. A different value
-may be needed for security reasons or because the `/registry` path is in use.
-
-The root value is prepended to all registry paths so as to create the absolute
-path. For example:
-
-* `/` maps to `/registry`
-* `/services` maps to `/registry/services`
-* `/users/yarn` maps to `/registry/users/yarn`
-
-A different value of `hadoop.registry.zk.root` would result in a different
-mapping to absolute zookeeper paths.
-
-
-## Security Options
-
-Registry security is enabled when the property `hadoop.registry.secure`
-is set to `true`. Once set, nodes are created with permissions, so that
-only a specific user *and the configured cluster "superuser" accounts*
-can write under their home path of `${hadoop.registry.zk.root}/users`.
-Only the superuser accounts
-will be able to manipulate the root path, including 
`${hadoop.registry.zk.root}/services`
-and `${hadoop.registry.zk.root}/users`.
-
-All write operations on the registry (including deleting entries and paths)
-must be authenticated. Read operations are still permitted by unauthenticated
-callers.
-
-The key settings for secure registry support are:
-
-* enabling the secure mode:  `hadoop.registry.secure`
-* listing the superuser zookeeper ACLs:  `hadoop.registry.system.acls`
-* listing the kerberos realm for the principals: 
`hadoop.registry.kerberos.realm`
-* identifying the JAAS context within the JAAS configuration which defines
-the user: `hadoop.registry.jaas.context`
-
-
-### Enabling security
-
-```
-  <property>
-    <description>
-      Key to set if the registry is secure. Turning it on
-      changes the permissions policy from "open access"
-      to restrictions on kerberos with the option of
-      a user adding one or more auth key pairs down their
-      own tree.
-    </description>
-    <name>hadoop.registry.secure</name>
-    <value>false</value>
-  </property>
-```
-
-### Identifying the client JAAS context
-
-The registry clients must identify the JAAS context which they use
-to authenticate to the registry.
-
-```
-  <property>
-    <description>
-      Key to define the JAAS context. Used in secure mode
-    </description>
-    <name>hadoop.registry.jaas.context</name>
-    <value>Client</value>
-  </property>
-```
-
-*Note* as the Resource Manager is simply another client of the registry, it
-too must have this context defined.
-
-
-### Identifying the system accounts `hadoop.registry.system.acls`
-
-These are the the accounts which are given full access to the base of the
-registry. The Resource Manager needs this option to create the root paths.
-
-Client applications writing to the registry access to the nodes it creates.
-
-1. The property `hadoop.registry.system.acls` takes a comma-separated list
-of zookeeper `ACLs` which are given full access to created nodes; the 
permissions
-`READ | WRITE | CREATE | DELETE | ADMIN`.
-2. Any zookeeper ACL scheme may be added to this, such as the `digest:` scheme.
-3. The SASL scheme, `sasl:`, is used to identify which callers identified
-by sasl have full access. These are the superuser accounts.
-4. They may be identified by elements such as `sasl:[email protected]`.
-5. To aid portability of SASL settings, especially the default value,
-any `sasl:` entry without the realm value —that
-is, any entry that terminates in the `@` symbol— has the current realm 
appended
-to it.
-6. This realm is set to that of the current user.
-7. It may be overridden by the property `hadoop.registry.kerberos.realm`.
-
-
-```
-  <property>
-    <description>
-      A comma separated list of Zookeeper ACL identifiers with
-      system access to the registry in a secure cluster.
-      These are given full access to all entries.
-      If there is an "@" at the end of a SASL entry it
-      instructs the registry client to append the default kerberos domain.
-    </description>
-    <name>hadoop.registry.system.acls</name>
-    <value>sasl:yarn@, sasl:mapred@, sasl:mapred@, sasl:hdfs@</value>
-  </property>
-
-  <property>
-    <description>
-      The kerberos realm: used to set the realm of
-      system principals which do not declare their realm,
-      and any other accounts that need the value.
-      If empty, the default realm of the running process
-      is used.
-      If neither are known and the realm is needed, then the registry
-      service/client will fail.
-    </description>
-    <name>hadoop.registry.kerberos.realm</name>
-    <value></value>
-  </property>
-```
-
-Example: an `hadoop.registry.system.acls` entry of
- `sasl:yarn@, sasl:[email protected], sasl:system@REALM2`,
-would, in a YARN cluster with the realm `EXAMPLE.COM`, add the following
-admin accounts to every node
-
-* `sasl:[email protected]`
-* `sasl:[email protected]`
-* `sasl:system@REALM2`
-
-The identity of a client application creating registry entries will be
-automatically included in the permissions of all entries created.
-If, for example, the account creating an entry was `hbase`, another
-entry would be created
-
-* `sasl:[email protected]`
-
-
-**Important**: when setting the system ACLS, *it is critical to include the
-identity of the YARN Resource Manager*.
-
-The RM needs to be able to create the root and user paths, and delete service
-records during application and container cleanup.
-
-
-## Zookeeper connection management options
-
-Some low level options manage the ZK connection —more specifically, its 
failure
-handling.
-
-The Zookeeper registry clients use Apache Curator to connect to Zookeeper,
-a library which detects timeouts and attempts to reconnect to one of the
-servers which forms the zookeeper quorum. It is only after a timeout is 
detected
-that a retry is triggered.
-
-```
-  <property>
-    <description>
-      Zookeeper session timeout in milliseconds
-    </description>
-    <name>hadoop.registry.zk.session.timeout.ms</name>
-    <value>60000</value>
-  </property>
-
-  <property>
-    <description>
-      Zookeeper connection timeout in milliseconds
-    </description>
-    <name>hadoop.registry.zk.connection.timeout.ms</name>
-    <value>15000</value>
-  </property>
-
-  <property>
-    <description>
-      Zookeeper connection retry count before failing
-    </description>
-    <name>hadoop.registry.zk.retry.times</name>
-    <value>5</value>
-  </property>
-
-  <property>
-    <description>
-    </description>
-    <name>hadoop.registry.zk.retry.interval.ms</name>
-    <value>1000</value>
-  </property>
-
-  <property>
-    <description>
-      Zookeeper retry limit in milliseconds, during
-      exponential backoff.
-      This places a limit even
-      if the retry times and interval limit, combined
-      with the backoff policy, result in a long retry
-      period
-    </description>
-    <name>hadoop.registry.zk.retry.ceiling.ms</name>
-    <value>60000</value>
-  </property>
-```
-
-The retry strategy used in the registry client is
-[`BoundedExponentialBackoffRetry`](https://curator.apache.org/apidocs/org/apache/curator/retry/BoundedExponentialBackoffRetry.html):
-This backs off exponentially on connection failures before eventually
-concluding that the quorum is unreachable and failing.
-
-## Complete Set of Configuration Options
-
-```
-  <!-- YARN registry -->
-
-  <property>
-    <description>
-      A comma separated list of hostname:port pairs defining the
-      zookeeper quorum binding for the registry
-    </description>
-    <name>hadoop.registry.zk.quorum</name>
-    <value>localhost:2181</value>
-  </property>
-
-  <property>
-    <description>
-      The root zookeeper node for the registry
-    </description>
-    <name>hadoop.registry.zk.root</name>
-    <value>/registry</value>
-  </property>
-
-  <property>
-    <description>
-      Key to set if the registry is secure. Turning it on
-      changes the permissions policy from "open access"
-      to restrictions on kerberos with the option of
-      a user adding one or more auth key pairs down their
-      own tree.
-    </description>
-    <name>hadoop.registry.secure</name>
-    <value>false</value>
-  </property>
-
-  <property>
-    <description>
-      A comma separated list of Zookeeper ACL identifiers with
-      system access to the registry in a secure cluster.
-
-      These are given full access to all entries.
-
-      If there is an "@" at the end of a SASL entry it
-      instructs the registry client to append the default kerberos domain.
-    </description>
-    <name>hadoop.registry.system.acls</name>
-    <value>sasl:yarn@, sasl:mapred@, sasl:mapred@, sasl:hdfs@</value>
-  </property>
-
-  <property>
-    <description>
-      The kerberos realm: used to set the realm of
-      system principals which do not declare their realm,
-      and any other accounts that need the value.
-
-      If empty, the default realm of the running process
-      is used.
-
-      If neither are known and the realm is needed, then the registry
-      service/client will fail.
-    </description>
-    <name>hadoop.registry.kerberos.realm</name>
-    <value></value>
-  </property>
-
-  <property>
-    <description>
-      Key to define the JAAS context. Used in secure
-      mode
-    </description>
-    <name>hadoop.registry.jaas.context</name>
-    <value>Client</value>
-  </property>
-
-
-  <property>
-    <description>
-      Zookeeper session timeout in milliseconds
-    </description>
-    <name>hadoop.registry.zk.session.timeout.ms</name>
-    <value>60000</value>
-  </property>
-
-  <property>
-    <description>
-      Zookeeper session timeout in milliseconds
-    </description>
-    <name>hadoop.registry.zk.connection.timeout.ms</name>
-    <value>15000</value>
-  </property>
-
-  <property>
-    <description>
-      Zookeeper connection retry count before failing
-    </description>
-    <name>hadoop.registry.zk.retry.times</name>
-    <value>5</value>
-  </property>
-
-  <property>
-    <description>
-    </description>
-    <name>hadoop.registry.zk.retry.interval.ms</name>
-    <value>1000</value>
-  </property>
-
-  <property>
-    <description>
-      Zookeeper retry limit in milliseconds, during
-      exponential backoff: {@value}
-
-      This places a limit even
-      if the retry times and interval limit, combined
-      with the backoff policy, result in a long retry
-      period
-    </description>
-    <name>hadoop.registry.zk.retry.ceiling.ms</name>
-    <value>60000</value>
-  </property>
-```


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to