http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aa34324/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterHeartbeatService.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterHeartbeatService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterHeartbeatService.java
deleted file mode 100644
index 427d514..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterHeartbeatService.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.router;
-
-import org.apache.curator.framework.CuratorFramework;
-import org.apache.curator.framework.CuratorFrameworkFactory;
-import org.apache.curator.retry.RetryNTimes;
-import org.apache.curator.test.TestingServer;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
-import org.apache.hadoop.hdfs.server.federation.store.RouterStore;
-import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
-import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
-import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl;
-import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetRouterRegistrationRequest;
-import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetRouterRegistrationResponse;
-import org.apache.hadoop.hdfs.server.federation.store.records.RouterState;
-import 
org.apache.hadoop.hdfs.server.federation.store.records.StateStoreVersion;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.concurrent.TimeUnit;
-
-import static 
org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.waitStateStore;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test cases for router heartbeat service.
- */
-public class TestRouterHeartbeatService {
-  private Router router;
-  private final String routerId = "router1";
-  private TestingServer testingServer;
-  private CuratorFramework curatorFramework;
-
-  @Before
-  public void setup() throws Exception {
-    router = new Router();
-    router.setRouterId(routerId);
-    Configuration conf = new Configuration();
-    conf.setInt(DFSConfigKeys.DFS_ROUTER_CACHE_TIME_TO_LIVE_MS, 1);
-    Configuration routerConfig =
-        new RouterConfigBuilder(conf).stateStore().build();
-    routerConfig.setLong(DFSConfigKeys.FEDERATION_STORE_CONNECTION_TEST_MS,
-        TimeUnit.HOURS.toMillis(1));
-    routerConfig.setClass(DFSConfigKeys.FEDERATION_STORE_DRIVER_CLASS,
-        StateStoreZooKeeperImpl.class, StateStoreDriver.class);
-
-    testingServer = new TestingServer();
-    String connectStr = testingServer.getConnectString();
-    curatorFramework = CuratorFrameworkFactory.builder()
-        .connectString(connectStr)
-        .retryPolicy(new RetryNTimes(100, 100))
-        .build();
-    curatorFramework.start();
-    routerConfig.set(CommonConfigurationKeys.ZK_ADDRESS, connectStr);
-    router.init(routerConfig);
-    router.start();
-
-
-    waitStateStore(router.getStateStore(), TimeUnit.SECONDS.toMicros(10));
-  }
-
-  @Test
-  public void testStateStoreUnavailable() throws IOException {
-    curatorFramework.close();
-    testingServer.stop();
-    router.getStateStore().stop();
-    // The driver is not ready
-    assertFalse(router.getStateStore().isDriverReady());
-
-    // Do a heartbeat, and no exception thrown out
-    RouterHeartbeatService heartbeatService =
-        new RouterHeartbeatService(router);
-    heartbeatService.updateStateStore();
-  }
-
-  @Test
-  public void testStateStoreAvailable() throws Exception {
-    // The driver is ready
-    StateStoreService stateStore = router.getStateStore();
-    assertTrue(router.getStateStore().isDriverReady());
-    RouterStore routerStore = router.getRouterStateManager();
-
-    // No record about this router
-    stateStore.refreshCaches(true);
-    GetRouterRegistrationRequest request =
-        GetRouterRegistrationRequest.newInstance(routerId);
-    GetRouterRegistrationResponse response =
-        router.getRouterStateManager().getRouterRegistration(request);
-    RouterState routerState = response.getRouter();
-    String id = routerState.getRouterId();
-    StateStoreVersion version = routerState.getStateStoreVersion();
-    assertNull(id);
-    assertNull(version);
-
-    // Do a heartbeat
-    RouterHeartbeatService heartbeatService =
-        new RouterHeartbeatService(router);
-    heartbeatService.updateStateStore();
-
-    // We should have a record
-    stateStore.refreshCaches(true);
-    request = GetRouterRegistrationRequest.newInstance(routerId);
-    response = routerStore.getRouterRegistration(request);
-    routerState = response.getRouter();
-    id = routerState.getRouterId();
-    version = routerState.getStateStoreVersion();
-    assertNotNull(id);
-    assertNotNull(version);
-  }
-
-  @After
-  public void tearDown() throws IOException {
-    if (curatorFramework != null) {
-      curatorFramework.close();
-    }
-    if (testingServer != null) {
-      testingServer.stop();
-    }
-    if (router != null) {
-      router.shutDown();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aa34324/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
deleted file mode 100644
index 8702b3c..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
+++ /dev/null
@@ -1,143 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.router;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.util.Collections;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
-import 
org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.NamenodeContext;
-import org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.RouterContext;
-import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
-import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
-import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
-import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
-import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
-import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Test a router end-to-end including the MountTable.
- */
-public class TestRouterMountTable {
-
-  private static StateStoreDFSCluster cluster;
-  private static NamenodeContext nnContext;
-  private static RouterContext routerContext;
-  private static MountTableResolver mountTable;
-
-  @BeforeClass
-  public static void globalSetUp() throws Exception {
-
-    // Build and start a federated cluster
-    cluster = new StateStoreDFSCluster(false, 1);
-    Configuration conf = new RouterConfigBuilder()
-        .stateStore()
-        .admin()
-        .rpc()
-        .build();
-    cluster.addRouterOverrides(conf);
-    cluster.startCluster();
-    cluster.startRouters();
-    cluster.waitClusterUp();
-
-    // Get the end points
-    nnContext = cluster.getRandomNamenode();
-    routerContext = cluster.getRandomRouter();
-    Router router = routerContext.getRouter();
-    mountTable = (MountTableResolver) router.getSubclusterResolver();
-  }
-
-  @AfterClass
-  public static void tearDown() {
-    if (cluster != null) {
-      cluster.stopRouter(routerContext);
-      cluster.shutdown();
-      cluster = null;
-    }
-  }
-
-  @Test
-  public void testReadOnly() throws Exception {
-
-    // Add a read only entry
-    MountTable readOnlyEntry = MountTable.newInstance(
-        "/readonly", Collections.singletonMap("ns0", "/testdir"));
-    readOnlyEntry.setReadOnly(true);
-    assertTrue(addMountTable(readOnlyEntry));
-
-    // Add a regular entry
-    MountTable regularEntry = MountTable.newInstance(
-        "/regular", Collections.singletonMap("ns0", "/testdir"));
-    assertTrue(addMountTable(regularEntry));
-
-    // Create a folder which should show in all locations
-    final FileSystem nnFs = nnContext.getFileSystem();
-    final FileSystem routerFs = routerContext.getFileSystem();
-    assertTrue(routerFs.mkdirs(new Path("/regular/newdir")));
-
-    FileStatus dirStatusNn =
-        nnFs.getFileStatus(new Path("/testdir/newdir"));
-    assertTrue(dirStatusNn.isDirectory());
-    FileStatus dirStatusRegular =
-        routerFs.getFileStatus(new Path("/regular/newdir"));
-    assertTrue(dirStatusRegular.isDirectory());
-    FileStatus dirStatusReadOnly =
-        routerFs.getFileStatus(new Path("/readonly/newdir"));
-    assertTrue(dirStatusReadOnly.isDirectory());
-
-    // It should fail writing into a read only path
-    try {
-      routerFs.mkdirs(new Path("/readonly/newdirfail"));
-      fail("We should not be able to write into a read only mount point");
-    } catch (IOException ioe) {
-      String msg = ioe.getMessage();
-      assertTrue(msg.startsWith(
-          "/readonly/newdirfail is in a read only mount point"));
-    }
-  }
-
-  /**
-   * Add a mount table entry to the mount table through the admin API.
-   * @param entry Mount table entry to add.
-   * @return If it was succesfully added.
-   * @throws IOException Problems adding entries.
-   */
-  private boolean addMountTable(final MountTable entry) throws IOException {
-    RouterClient client = routerContext.getAdminClient();
-    MountTableManager mountTableManager = client.getMountTableManager();
-    AddMountTableEntryRequest addRequest =
-        AddMountTableEntryRequest.newInstance(entry);
-    AddMountTableEntryResponse addResponse =
-        mountTableManager.addMountTableEntry(addRequest);
-
-    // Reload the Router cache
-    mountTable.loadCache(true);
-
-    return addResponse.getStatus();
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aa34324/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java
deleted file mode 100644
index 3d58146..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java
+++ /dev/null
@@ -1,143 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.router;
-
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_MONITOR_NAMENODE;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Collection;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
-import 
org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.NamenodeContext;
-import org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.RouterContext;
-import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
-import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext;
-import 
org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver;
-import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * Test namenodes monitor behavior in the Router.
- */
-public class TestRouterNamenodeMonitoring {
-
-  private static StateStoreDFSCluster cluster;
-  private static RouterContext routerContext;
-  private static MembershipNamenodeResolver resolver;
-
-  private String ns0;
-  private String ns1;
-  private long initializedTime;
-
-  @Before
-  public void setUp() throws Exception {
-    // Build and start a federated cluster with HA enabled
-    cluster = new StateStoreDFSCluster(true, 2);
-    // Enable heartbeat service and local heartbeat
-    Configuration routerConf = new RouterConfigBuilder()
-        .stateStore()
-        .admin()
-        .rpc()
-        .enableLocalHeartbeat(true)
-        .heartbeat()
-        .build();
-
-    // Specify local node (ns0.nn1) to monitor
-    StringBuilder sb = new StringBuilder();
-    ns0 = cluster.getNameservices().get(0);
-    NamenodeContext context = cluster.getNamenodes(ns0).get(1);
-    routerConf.set(DFS_NAMESERVICE_ID, ns0);
-    routerConf.set(DFS_HA_NAMENODE_ID_KEY, context.getNamenodeId());
-
-    // Specify namenodes (ns1.nn0,ns1.nn1) to monitor
-    sb = new StringBuilder();
-    ns1 = cluster.getNameservices().get(1);
-    for (NamenodeContext ctx : cluster.getNamenodes(ns1)) {
-      String suffix = ctx.getConfSuffix();
-      if (sb.length() != 0) {
-        sb.append(",");
-      }
-      sb.append(suffix);
-    }
-    // override with the namenodes: ns1.nn0,ns1.nn1
-    routerConf.set(DFS_ROUTER_MONITOR_NAMENODE, sb.toString());
-
-    cluster.addRouterOverrides(routerConf);
-    cluster.startCluster();
-    cluster.startRouters();
-    cluster.waitClusterUp();
-
-    routerContext = cluster.getRandomRouter();
-    resolver = (MembershipNamenodeResolver) routerContext.getRouter()
-        .getNamenodeResolver();
-    initializedTime = Time.now();
-  }
-
-  @After
-  public void tearDown() {
-    if (cluster != null) {
-      cluster.stopRouter(routerContext);
-      cluster.shutdown();
-      cluster = null;
-    }
-  }
-
-  @Test
-  public void testNamenodeMonitoring() throws Exception {
-    // Set nn0 to active for all nameservices
-    for (String ns : cluster.getNameservices()) {
-      cluster.switchToActive(ns, "nn0");
-      cluster.switchToStandby(ns, "nn1");
-    }
-
-    Collection<NamenodeHeartbeatService> heartbeatServices = routerContext
-        .getRouter().getNamenodeHearbeatServices();
-    // manually trigger the heartbeat
-    for (NamenodeHeartbeatService service : heartbeatServices) {
-      service.periodicInvoke();
-    }
-
-    resolver.loadCache(true);
-    List<? extends FederationNamenodeContext> namespaceInfo0 =
-        resolver.getNamenodesForNameserviceId(ns0);
-    List<? extends FederationNamenodeContext> namespaceInfo1 =
-        resolver.getNamenodesForNameserviceId(ns1);
-
-    // The modified date won't be updated in ns0.nn0 since it isn't
-    // monitored by the Router.
-    assertEquals("nn0", namespaceInfo0.get(1).getNamenodeId());
-    assertTrue(namespaceInfo0.get(1).getDateModified() < initializedTime);
-
-    // other namnodes should be updated as expected
-    assertEquals("nn1", namespaceInfo0.get(0).getNamenodeId());
-    assertTrue(namespaceInfo0.get(0).getDateModified() > initializedTime);
-
-    assertEquals("nn0", namespaceInfo1.get(0).getNamenodeId());
-    assertTrue(namespaceInfo1.get(0).getDateModified() > initializedTime);
-
-    assertEquals("nn1", namespaceInfo1.get(1).getNamenodeId());
-    assertTrue(namespaceInfo1.get(1).getDateModified() > initializedTime);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aa34324/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
deleted file mode 100644
index 2b40bbd..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
+++ /dev/null
@@ -1,452 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.router;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.EnumSet;
-import java.util.List;
-import java.util.UUID;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CreateFlag;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.QuotaUsage;
-import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
-import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
-import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
-import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
-import 
org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.NamenodeContext;
-import org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.RouterContext;
-import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
-import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
-import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
-import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
-import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
-import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
-import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
-import 
org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
-import 
org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse;
-import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.google.common.base.Supplier;
-
-/**
- * Tests quota behaviors in Router-based Federation.
- */
-public class TestRouterQuota {
-  private static StateStoreDFSCluster cluster;
-  private static NamenodeContext nnContext1;
-  private static NamenodeContext nnContext2;
-  private static RouterContext routerContext;
-  private static MountTableResolver resolver;
-
-  private static final int BLOCK_SIZE = 512;
-
-  @Before
-  public void setUp() throws Exception {
-
-    // Build and start a federated cluster
-    cluster = new StateStoreDFSCluster(false, 2);
-    Configuration routerConf = new RouterConfigBuilder()
-        .stateStore()
-        .admin()
-        .quota()
-        .rpc()
-        .build();
-    routerConf.set(DFSConfigKeys.DFS_ROUTER_QUOTA_CACHE_UPATE_INTERVAL, "2s");
-
-    // override some hdfs settings that used in testing space quota
-    Configuration hdfsConf = new Configuration(false);
-    hdfsConf.setInt(HdfsClientConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
-    hdfsConf.setInt(HdfsClientConfigKeys.DFS_REPLICATION_KEY, 1);
-
-    cluster.addRouterOverrides(routerConf);
-    cluster.addNamenodeOverrides(hdfsConf);
-    cluster.startCluster();
-    cluster.startRouters();
-    cluster.waitClusterUp();
-
-    nnContext1 = cluster.getNamenode(cluster.getNameservices().get(0), null);
-    nnContext2 = cluster.getNamenode(cluster.getNameservices().get(1), null);
-    routerContext = cluster.getRandomRouter();
-    Router router = routerContext.getRouter();
-    resolver = (MountTableResolver) router.getSubclusterResolver();
-  }
-
-  @After
-  public void tearDown() {
-    if (cluster != null) {
-      cluster.stopRouter(routerContext);
-      cluster.shutdown();
-      cluster = null;
-    }
-  }
-
-  @Test
-  public void testNamespaceQuotaExceed() throws Exception {
-    long nsQuota = 3;
-    final FileSystem nnFs1 = nnContext1.getFileSystem();
-    final FileSystem nnFs2 = nnContext2.getFileSystem();
-
-    // Add two mount tables:
-    // /nsquota --> ns0---testdir1
-    // /nsquota/subdir --> ns1---testdir2
-    nnFs1.mkdirs(new Path("/testdir1"));
-    nnFs2.mkdirs(new Path("/testdir2"));
-    MountTable mountTable1 = MountTable.newInstance("/nsquota",
-        Collections.singletonMap("ns0", "/testdir1"));
-
-    mountTable1.setQuota(new 
RouterQuotaUsage.Builder().quota(nsQuota).build());
-    addMountTable(mountTable1);
-
-    MountTable mountTable2 = MountTable.newInstance("/nsquota/subdir",
-        Collections.singletonMap("ns1", "/testdir2"));
-    mountTable2.setQuota(new 
RouterQuotaUsage.Builder().quota(nsQuota).build());
-    addMountTable(mountTable2);
-
-    final FileSystem routerFs = routerContext.getFileSystem();
-    GenericTestUtils.waitFor(new Supplier<Boolean>() {
-
-      @Override
-      public Boolean get() {
-        boolean isNsQuotaViolated = false;
-        try {
-          // create new directory to trigger NSQuotaExceededException
-          routerFs.mkdirs(new Path("/nsquota/" + UUID.randomUUID()));
-          routerFs.mkdirs(new Path("/nsquota/subdir/" + UUID.randomUUID()));
-        } catch (NSQuotaExceededException e) {
-          isNsQuotaViolated = true;
-        } catch (IOException ignored) {
-        }
-        return isNsQuotaViolated;
-      }
-    }, 5000, 60000);
-    // mkdir in real FileSystem should be okay
-    nnFs1.mkdirs(new Path("/testdir1/" + UUID.randomUUID()));
-    nnFs2.mkdirs(new Path("/testdir2/" + UUID.randomUUID()));
-  }
-
-  @Test
-  public void testStorageSpaceQuotaaExceed() throws Exception {
-    long ssQuota = 3071;
-    final FileSystem nnFs1 = nnContext1.getFileSystem();
-    final FileSystem nnFs2 = nnContext2.getFileSystem();
-
-    // Add two mount tables:
-    // /ssquota --> ns0---testdir3
-    // /ssquota/subdir --> ns1---testdir4
-    nnFs1.mkdirs(new Path("/testdir3"));
-    nnFs2.mkdirs(new Path("/testdir4"));
-    MountTable mountTable1 = MountTable.newInstance("/ssquota",
-        Collections.singletonMap("ns0", "/testdir3"));
-
-    mountTable1
-        .setQuota(new RouterQuotaUsage.Builder().spaceQuota(ssQuota).build());
-    addMountTable(mountTable1);
-
-    MountTable mountTable2 = MountTable.newInstance("/ssquota/subdir",
-        Collections.singletonMap("ns1", "/testdir4"));
-    mountTable2
-        .setQuota(new RouterQuotaUsage.Builder().spaceQuota(ssQuota).build());
-    addMountTable(mountTable2);
-
-    final DFSClient routerClient = routerContext.getClient();
-    routerClient.create("/ssquota/file", true).close();
-    routerClient.create("/ssquota/subdir/file", true).close();
-
-    GenericTestUtils.waitFor(new Supplier<Boolean>() {
-
-      @Override
-      public Boolean get() {
-        boolean isDsQuotaViolated = false;
-        try {
-          // append data to trigger NSQuotaExceededException
-          appendData("/ssquota/file", routerClient, BLOCK_SIZE);
-          appendData("/ssquota/subdir/file", routerClient, BLOCK_SIZE);
-        } catch (DSQuotaExceededException e) {
-          isDsQuotaViolated = true;
-        } catch (IOException ignored) {
-        }
-        return isDsQuotaViolated;
-      }
-    }, 5000, 60000);
-
-    // append data to destination path in real FileSystem should be okay
-    appendData("/testdir3/file", nnContext1.getClient(), BLOCK_SIZE);
-    appendData("/testdir4/file", nnContext2.getClient(), BLOCK_SIZE);
-  }
-
-  /**
-   * Add a mount table entry to the mount table through the admin API.
-   * @param entry Mount table entry to add.
-   * @return If it was successfully added.
-   * @throws IOException Problems adding entries.
-   */
-  private boolean addMountTable(final MountTable entry) throws IOException {
-    RouterClient client = routerContext.getAdminClient();
-    MountTableManager mountTableManager = client.getMountTableManager();
-    AddMountTableEntryRequest addRequest =
-        AddMountTableEntryRequest.newInstance(entry);
-    AddMountTableEntryResponse addResponse =
-        mountTableManager.addMountTableEntry(addRequest);
-
-    // Reload the Router cache
-    resolver.loadCache(true);
-
-    return addResponse.getStatus();
-  }
-
-  /**
-   * Append data in specified file.
-   * @param path Path of file.
-   * @param client DFS Client.
-   * @param dataLen The length of write data.
-   * @throws IOException
-   */
-  private void appendData(String path, DFSClient client, int dataLen)
-      throws IOException {
-    EnumSet<CreateFlag> createFlag = EnumSet.of(CreateFlag.APPEND);
-    HdfsDataOutputStream stream = client.append(path, 1024, createFlag, null,
-        null);
-    byte[] data = new byte[dataLen];
-    stream.write(data);
-    stream.close();
-  }
-
-  @Test
-  public void testSetQuota() throws Exception {
-    long nsQuota = 5;
-    long ssQuota = 100;
-    final FileSystem nnFs1 = nnContext1.getFileSystem();
-    final FileSystem nnFs2 = nnContext2.getFileSystem();
-
-    // Add two mount tables:
-    // /setquota --> ns0---testdir5
-    // /setquota/subdir --> ns1---testdir6
-    nnFs1.mkdirs(new Path("/testdir5"));
-    nnFs2.mkdirs(new Path("/testdir6"));
-    MountTable mountTable1 = MountTable.newInstance("/setquota",
-        Collections.singletonMap("ns0", "/testdir5"));
-    mountTable1
-        .setQuota(new RouterQuotaUsage.Builder().quota(nsQuota)
-        .spaceQuota(ssQuota).build());
-    addMountTable(mountTable1);
-
-    // don't set quota for subpath of mount table
-    MountTable mountTable2 = MountTable.newInstance("/setquota/subdir",
-        Collections.singletonMap("ns1", "/testdir6"));
-    addMountTable(mountTable2);
-
-    RouterQuotaUpdateService updateService = routerContext.getRouter()
-        .getQuotaCacheUpdateService();
-    // ensure setQuota RPC call was invoked
-    updateService.periodicInvoke();
-
-    ClientProtocol client1 = nnContext1.getClient().getNamenode();
-    ClientProtocol client2 = nnContext2.getClient().getNamenode();
-    final QuotaUsage quota1 = client1.getQuotaUsage("/testdir5");
-    final QuotaUsage quota2 = client2.getQuotaUsage("/testdir6");
-
-    assertEquals(nsQuota, quota1.getQuota());
-    assertEquals(ssQuota, quota1.getSpaceQuota());
-    assertEquals(nsQuota, quota2.getQuota());
-    assertEquals(ssQuota, quota2.getSpaceQuota());
-  }
-
-  @Test
-  public void testGetQuota() throws Exception {
-    long nsQuota = 10;
-    long ssQuota = 100;
-    final FileSystem nnFs1 = nnContext1.getFileSystem();
-    final FileSystem nnFs2 = nnContext2.getFileSystem();
-
-    // Add two mount tables:
-    // /getquota --> ns0---/testdir7
-    // /getquota/subdir1 --> ns0---/testdir7/subdir
-    // /getquota/subdir2 --> ns1---/testdir8
-    nnFs1.mkdirs(new Path("/testdir7"));
-    nnFs1.mkdirs(new Path("/testdir7/subdir"));
-    nnFs2.mkdirs(new Path("/testdir8"));
-    MountTable mountTable1 = MountTable.newInstance("/getquota",
-        Collections.singletonMap("ns0", "/testdir7"));
-    mountTable1
-        .setQuota(new RouterQuotaUsage.Builder().quota(nsQuota)
-        .spaceQuota(ssQuota).build());
-    addMountTable(mountTable1);
-
-    MountTable mountTable2 = MountTable.newInstance("/getquota/subdir1",
-        Collections.singletonMap("ns0", "/testdir7/subdir"));
-    addMountTable(mountTable2);
-
-    MountTable mountTable3 = MountTable.newInstance("/getquota/subdir2",
-        Collections.singletonMap("ns1", "/testdir8"));
-    addMountTable(mountTable3);
-
-    // use router client to create new files
-    DFSClient routerClient = routerContext.getClient();
-    routerClient.create("/getquota/file", true).close();
-    routerClient.create("/getquota/subdir1/file", true).close();
-    routerClient.create("/getquota/subdir2/file", true).close();
-
-    ClientProtocol clientProtocol = routerContext.getClient().getNamenode();
-    RouterQuotaUpdateService updateService = routerContext.getRouter()
-        .getQuotaCacheUpdateService();
-    updateService.periodicInvoke();
-    final QuotaUsage quota = clientProtocol.getQuotaUsage("/getquota");
-    // the quota should be aggregated
-    assertEquals(6, quota.getFileAndDirectoryCount());
-  }
-
-  @Test
-  public void testStaleQuotaRemoving() throws Exception {
-    long nsQuota = 20;
-    long ssQuota = 200;
-    String stalePath = "/stalequota";
-    final FileSystem nnFs1 = nnContext1.getFileSystem();
-
-    // Add one mount tables:
-    // /stalequota --> ns0---/testdir9
-    nnFs1.mkdirs(new Path("/testdir9"));
-    MountTable mountTable = MountTable.newInstance(stalePath,
-        Collections.singletonMap("ns0", "/testdir9"));
-    mountTable.setQuota(new RouterQuotaUsage.Builder().quota(nsQuota)
-        .spaceQuota(ssQuota).build());
-    addMountTable(mountTable);
-
-    // Call periodicInvoke to ensure quota for stalePath was
-    // loaded into quota manager.
-    RouterQuotaUpdateService updateService = routerContext.getRouter()
-        .getQuotaCacheUpdateService();
-    updateService.periodicInvoke();
-
-    // use quota manager to get its quota usage and do verification
-    RouterQuotaManager quotaManager = routerContext.getRouter()
-        .getQuotaManager();
-    RouterQuotaUsage quota = quotaManager.getQuotaUsage(stalePath);
-    assertEquals(nsQuota, quota.getQuota());
-    assertEquals(ssQuota, quota.getSpaceQuota());
-
-    // remove stale path entry
-    removeMountTable(stalePath);
-    updateService.periodicInvoke();
-    // the stale entry should be removed and we will get null
-    quota = quotaManager.getQuotaUsage(stalePath);
-    assertNull(quota);
-  }
-
-  /**
-   * Remove a mount table entry to the mount table through the admin API.
-   * @param entry Mount table entry to remove.
-   * @return If it was successfully removed.
-   * @throws IOException Problems removing entries.
-   */
-  private boolean removeMountTable(String path) throws IOException {
-    RouterClient client = routerContext.getAdminClient();
-    MountTableManager mountTableManager = client.getMountTableManager();
-    RemoveMountTableEntryRequest removeRequest = RemoveMountTableEntryRequest
-        .newInstance(path);
-    RemoveMountTableEntryResponse removeResponse = mountTableManager
-        .removeMountTableEntry(removeRequest);
-
-    // Reload the Router cache
-    resolver.loadCache(true);
-    return removeResponse.getStatus();
-  }
-
-  @Test
-  public void testQuotaUpdating() throws Exception {
-    long nsQuota = 30;
-    long ssQuota = 1024;
-    String path = "/updatequota";
-    final FileSystem nnFs1 = nnContext1.getFileSystem();
-
-    // Add one mount table:
-    // /updatequota --> ns0---/testdir10
-    nnFs1.mkdirs(new Path("/testdir10"));
-    MountTable mountTable = MountTable.newInstance(path,
-        Collections.singletonMap("ns0", "/testdir10"));
-    mountTable.setQuota(new RouterQuotaUsage.Builder().quota(nsQuota)
-        .spaceQuota(ssQuota).build());
-    addMountTable(mountTable);
-
-    // Call periodicInvoke to ensure quota  updated in quota manager
-    // and state store.
-    RouterQuotaUpdateService updateService = routerContext.getRouter()
-        .getQuotaCacheUpdateService();
-    updateService.periodicInvoke();
-
-    // verify initial quota value
-    List<MountTable> results = getMountTable(path);
-    MountTable updatedMountTable = !results.isEmpty() ? results.get(0) : null;
-    RouterQuotaUsage quota = updatedMountTable.getQuota();
-    assertEquals(nsQuota, quota.getQuota());
-    assertEquals(ssQuota, quota.getSpaceQuota());
-    assertEquals(1, quota.getFileAndDirectoryCount());
-    assertEquals(0, quota.getSpaceConsumed());
-
-    // mkdir and write a new file
-    final FileSystem routerFs = routerContext.getFileSystem();
-    routerFs.mkdirs(new Path(path + "/" + UUID.randomUUID()));
-    DFSClient routerClient = routerContext.getClient();
-    routerClient.create(path + "/file", true).close();
-    appendData(path + "/file", routerClient, BLOCK_SIZE);
-
-    updateService.periodicInvoke();
-    results = getMountTable(path);
-    updatedMountTable = !results.isEmpty() ? results.get(0) : null;
-    quota = updatedMountTable.getQuota();
-
-    // verify if quota has been updated in state store
-    assertEquals(nsQuota, quota.getQuota());
-    assertEquals(ssQuota, quota.getSpaceQuota());
-    assertEquals(3, quota.getFileAndDirectoryCount());
-    assertEquals(BLOCK_SIZE, quota.getSpaceConsumed());
-  }
-
-  /**
-   * Get the mount table entries of specified path through the admin API.
-   * @param path Mount table entry to get.
-   * @return If it was successfully got.
-   * @throws IOException Problems getting entries.
-   */
-  private List<MountTable> getMountTable(String path) throws IOException {
-    // Reload the Router cache
-    resolver.loadCache(true);
-    RouterClient client = routerContext.getAdminClient();
-    MountTableManager mountTableManager = client.getMountTableManager();
-    GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest
-        .newInstance(path);
-    GetMountTableEntriesResponse removeResponse = mountTableManager
-        .getMountTableEntries(getRequest);
-
-    return removeResponse.getEntries();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aa34324/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuotaManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuotaManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuotaManager.java
deleted file mode 100644
index ce3ee17..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuotaManager.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.router;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Set;
-
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * Tests for class {@link RouterQuotaManager}.
- */
-public class TestRouterQuotaManager {
-  private static RouterQuotaManager manager;
-
-  @Before
-  public void setup() {
-    manager = new RouterQuotaManager();
-  }
-
-  @After
-  public void cleanup() {
-    manager.clear();
-  }
-
-  @Test
-  public void testGetChildrenPaths() {
-    RouterQuotaUsage quotaUsage = new RouterQuotaUsage.Builder().build();
-    manager.put("/path1", quotaUsage);
-    manager.put("/path2", quotaUsage);
-    manager.put("/path1/subdir", quotaUsage);
-    manager.put("/path1/subdir/subdir", quotaUsage);
-
-    Set<String> childrenPaths = manager.getPaths("/path1");
-    assertEquals(3, childrenPaths.size());
-    assertTrue(childrenPaths.contains("/path1/subdir")
-        && childrenPaths.contains("/path1/subdir/subdir")
-        && childrenPaths.contains("/path1"));
-
-    // test for corner case
-    manager.put("/path3", quotaUsage);
-    manager.put("/path3/subdir", quotaUsage);
-    manager.put("/path3-subdir", quotaUsage);
-
-    childrenPaths = manager.getPaths("/path3");
-    assertEquals(2, childrenPaths.size());
-    // path /path3-subdir should not be returned
-    assertTrue(childrenPaths.contains("/path3")
-        && childrenPaths.contains("/path3/subdir")
-        && !childrenPaths.contains("/path3-subdir"));
-  }
-
-  @Test
-  public void testGetQuotaUsage() {
-    RouterQuotaUsage quotaGet;
-
-    // test case1: get quota with an non-exist path
-    quotaGet = manager.getQuotaUsage("/non-exist-path");
-    assertNull(quotaGet);
-
-    // test case2: get quota from an no-quota set path
-    RouterQuotaUsage.Builder quota = new RouterQuotaUsage.Builder()
-        .quota(HdfsConstants.QUOTA_DONT_SET)
-        .spaceQuota(HdfsConstants.QUOTA_DONT_SET);
-    manager.put("/noQuotaSet", quota.build());
-    quotaGet = manager.getQuotaUsage("/noQuotaSet");
-    // it should return null
-    assertNull(quotaGet);
-
-    // test case3: get quota from an quota-set path
-    quota.quota(1);
-    quota.spaceQuota(HdfsConstants.QUOTA_DONT_SET);
-    manager.put("/hasQuotaSet", quota.build());
-    quotaGet = manager.getQuotaUsage("/hasQuotaSet");
-    assertEquals(1, quotaGet.getQuota());
-    assertEquals(HdfsConstants.QUOTA_DONT_SET, quotaGet.getSpaceQuota());
-
-    // test case4: get quota with an non-exist child path
-    quotaGet = manager.getQuotaUsage("/hasQuotaSet/file");
-    // it will return the nearest ancestor which quota was set
-    assertEquals(1, quotaGet.getQuota());
-    assertEquals(HdfsConstants.QUOTA_DONT_SET, quotaGet.getSpaceQuota());
-
-    // test case5: get quota with an child path which its parent
-    // wasn't quota set
-    quota.quota(HdfsConstants.QUOTA_DONT_SET);
-    quota.spaceQuota(HdfsConstants.QUOTA_DONT_SET);
-    manager.put("/hasQuotaSet/noQuotaSet", quota.build());
-    // here should returns the quota of path /hasQuotaSet
-    // (the nearest ancestor which quota was set)
-    quotaGet = manager.getQuotaUsage("/hasQuotaSet/noQuotaSet/file");
-    assertEquals(1, quotaGet.getQuota());
-    assertEquals(HdfsConstants.QUOTA_DONT_SET, quotaGet.getSpaceQuota());
-
-    // test case6: get quota with an child path which its parent was quota set
-    quota.quota(2);
-    quota.spaceQuota(HdfsConstants.QUOTA_DONT_SET);
-    manager.put("/hasQuotaSet/hasQuotaSet", quota.build());
-    // here should return the quota of path /hasQuotaSet/hasQuotaSet
-    quotaGet = manager.getQuotaUsage("/hasQuotaSet/hasQuotaSet/file");
-    assertEquals(2, quotaGet.getQuota());
-    assertEquals(HdfsConstants.QUOTA_DONT_SET, quotaGet.getSpaceQuota());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aa34324/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
deleted file mode 100644
index 61e7657..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.router;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
-import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
-import 
org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.NamenodeContext;
-import org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.RouterContext;
-import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
-import org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCMetrics;
-import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext;
-import 
org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver;
-import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * Test retry behavior of the Router RPC Client.
- */
-public class TestRouterRPCClientRetries {
-
-  private static StateStoreDFSCluster cluster;
-  private static NamenodeContext nnContext1;
-  private static RouterContext routerContext;
-  private static MembershipNamenodeResolver resolver;
-  private static ClientProtocol routerProtocol;
-
-  @Before
-  public void setUp() throws Exception {
-    // Build and start a federated cluster
-    cluster = new StateStoreDFSCluster(false, 2);
-    Configuration routerConf = new RouterConfigBuilder()
-        .stateStore()
-        .admin()
-        .rpc()
-        .build();
-
-    // reduce IPC client connection retry times and interval time
-    Configuration clientConf = new Configuration(false);
-    clientConf.setInt(
-        CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
-    clientConf.setInt(
-        CommonConfigurationKeys.IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY, 100);
-
-    cluster.addRouterOverrides(routerConf);
-    // override some settings for the client
-    cluster.startCluster(clientConf);
-    cluster.startRouters();
-    cluster.waitClusterUp();
-
-    nnContext1 = cluster.getNamenode(cluster.getNameservices().get(0), null);
-    routerContext = cluster.getRandomRouter();
-    resolver = (MembershipNamenodeResolver) routerContext.getRouter()
-        .getNamenodeResolver();
-    routerProtocol = routerContext.getClient().getNamenode();
-  }
-
-  @After
-  public void tearDown() {
-    if (cluster != null) {
-      cluster.stopRouter(routerContext);
-      cluster.shutdown();
-      cluster = null;
-    }
-  }
-
-  @Test
-  public void testRetryWhenAllNameServiceDown() throws Exception {
-    // shutdown the dfs cluster
-    MiniDFSCluster dfsCluster = cluster.getCluster();
-    dfsCluster.shutdown();
-
-    // register an invalid namenode report
-    registerInvalidNameReport();
-
-    // Create a directory via the router
-    String dirPath = "/testRetryWhenClusterisDown";
-    FsPermission permission = new FsPermission("705");
-    try {
-      routerProtocol.mkdirs(dirPath, permission, false);
-      fail("Should have thrown RemoteException error.");
-    } catch (RemoteException e) {
-      String ns0 = cluster.getNameservices().get(0);
-      GenericTestUtils.assertExceptionContains(
-          "No namenode available under nameservice " + ns0, e);
-    }
-
-    // Verify the retry times, it should only retry one time.
-    FederationRPCMetrics rpcMetrics = routerContext.getRouter()
-        .getRpcServer().getRPCMetrics();
-    assertEquals(1, rpcMetrics.getProxyOpRetries());
-  }
-
-  @Test
-  public void testRetryWhenOneNameServiceDown() throws Exception {
-    // shutdown the dfs cluster
-    MiniDFSCluster dfsCluster = cluster.getCluster();
-    dfsCluster.shutdownNameNode(0);
-
-    // register an invalid namenode report
-    registerInvalidNameReport();
-
-    DFSClient client = nnContext1.getClient();
-    // Renew lease for the DFS client, it will succeed.
-    routerProtocol.renewLease(client.getClientName());
-
-    // Verify the retry times, it will retry one time for ns0.
-    FederationRPCMetrics rpcMetrics = routerContext.getRouter()
-        .getRpcServer().getRPCMetrics();
-    assertEquals(1, rpcMetrics.getProxyOpRetries());
-  }
-
-  /**
-   * Register an invalid namenode report.
-   * @throws IOException
-   */
-  private void registerInvalidNameReport() throws IOException {
-    String ns0 = cluster.getNameservices().get(0);
-    List<? extends FederationNamenodeContext> origin = resolver
-        .getNamenodesForNameserviceId(ns0);
-    FederationNamenodeContext nnInfo = origin.get(0);
-    NamenodeStatusReport report = new NamenodeStatusReport(ns0,
-        nnInfo.getNamenodeId(), nnInfo.getRpcAddress(),
-        nnInfo.getServiceAddress(), nnInfo.getLifelineAddress(),
-        nnInfo.getWebAddress());
-    report.setRegistrationValid(false);
-    assertTrue(resolver.registerNamenode(report));
-    resolver.loadCache(true);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aa34324/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
deleted file mode 100644
index 9bcbcad..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
+++ /dev/null
@@ -1,898 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.federation.router;
-
-import static 
org.apache.hadoop.hdfs.server.federation.FederationTestUtils.addDirectory;
-import static 
org.apache.hadoop.hdfs.server.federation.FederationTestUtils.countContents;
-import static 
org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createFile;
-import static 
org.apache.hadoop.hdfs.server.federation.FederationTestUtils.deleteFile;
-import static 
org.apache.hadoop.hdfs.server.federation.FederationTestUtils.getFileStatus;
-import static 
org.apache.hadoop.hdfs.server.federation.FederationTestUtils.verifyFileExists;
-import static 
org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.TEST_STRING;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.lang.reflect.Method;
-import java.net.URISyntaxException;
-import java.util.Arrays;
-import java.util.EnumSet;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Random;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.crypto.CryptoProtocolVersion;
-import org.apache.hadoop.fs.CreateFlag;
-import org.apache.hadoop.fs.FileContext;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Options;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
-import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
-import org.apache.hadoop.hdfs.server.federation.RouterDFSCluster;
-import 
org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.NamenodeContext;
-import org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.RouterContext;
-import 
org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
-import org.apache.hadoop.io.EnumSetWritable;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.service.Service.STATE;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Supplier;
-import com.google.common.collect.Maps;
-
-/**
- * The the RPC interface of the {@link Router} implemented by
- * {@link RouterRpcServer}.
- */
-public class TestRouterRpc {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestRouterRpc.class);
-
-  /** Federated HDFS cluster. */
-  private static RouterDFSCluster cluster;
-
-  /** Random Router for this federated cluster. */
-  private RouterContext router;
-
-  /** Random nameservice in the federated cluster.  */
-  private String ns;
-  /** First namenode in the nameservice. */
-  private NamenodeContext namenode;
-
-  /** Client interface to the Router. */
-  private ClientProtocol routerProtocol;
-  /** Client interface to the Namenode. */
-  private ClientProtocol nnProtocol;
-
-  /** Filesystem interface to the Router. */
-  private FileSystem routerFS;
-  /** Filesystem interface to the Namenode. */
-  private FileSystem nnFS;
-
-  /** File in the Router. */
-  private String routerFile;
-  /** File in the Namenode. */
-  private String nnFile;
-
-
-  @BeforeClass
-  public static void globalSetUp() throws Exception {
-    cluster = new RouterDFSCluster(false, 2);
-
-    // Start NNs and DNs and wait until ready
-    cluster.startCluster();
-
-    // Start routers with only an RPC service
-    cluster.addRouterOverrides((new RouterConfigBuilder()).rpc().build());
-    cluster.startRouters();
-
-    // Register and verify all NNs with all routers
-    cluster.registerNamenodes();
-    cluster.waitNamenodeRegistration();
-  }
-
-  @AfterClass
-  public static void tearDown() {
-    cluster.shutdown();
-  }
-
-  @Before
-  public void testSetup() throws Exception {
-
-    // Create mock locations
-    cluster.installMockLocations();
-
-    // Delete all files via the NNs and verify
-    cluster.deleteAllFiles();
-
-    // Create test fixtures on NN
-    cluster.createTestDirectoriesNamenode();
-
-    // Wait to ensure NN has fully created its test directories
-    Thread.sleep(100);
-
-    // Default namenode and random router for this test
-    this.router = cluster.getRandomRouter();
-    this.ns = cluster.getNameservices().get(0);
-    this.namenode = cluster.getNamenode(ns, null);
-
-    // Handles to the ClientProtocol interface
-    this.routerProtocol = router.getClient().getNamenode();
-    this.nnProtocol = namenode.getClient().getNamenode();
-
-    // Handles to the filesystem client
-    this.nnFS = namenode.getFileSystem();
-    this.routerFS = router.getFileSystem();
-
-    // Create a test file on the NN
-    Random r = new Random();
-    String randomFile = "testfile-" + r.nextInt();
-    this.nnFile =
-        cluster.getNamenodeTestDirectoryForNS(ns) + "/" + randomFile;
-    this.routerFile =
-        cluster.getFederatedTestDirectoryForNS(ns) + "/" + randomFile;
-
-    createFile(nnFS, nnFile, 32);
-    verifyFileExists(nnFS, nnFile);
-  }
-
-  @Test
-  public void testRpcService() throws IOException {
-    Router testRouter = new Router();
-    List<String> nss = cluster.getNameservices();
-    String ns0 = nss.get(0);
-    Configuration routerConfig = cluster.generateRouterConfiguration(ns0, 
null);
-    RouterRpcServer server = new RouterRpcServer(routerConfig, testRouter,
-        testRouter.getNamenodeResolver(), testRouter.getSubclusterResolver());
-    server.init(routerConfig);
-    assertEquals(STATE.INITED, server.getServiceState());
-    server.start();
-    assertEquals(STATE.STARTED, server.getServiceState());
-    server.stop();
-    assertEquals(STATE.STOPPED, server.getServiceState());
-    server.close();
-    testRouter.close();
-  }
-
-  protected RouterDFSCluster getCluster() {
-    return TestRouterRpc.cluster;
-  }
-
-  protected RouterContext getRouterContext() {
-    return this.router;
-  }
-
-  protected void setRouter(RouterContext r)
-      throws IOException, URISyntaxException {
-    this.router = r;
-    this.routerProtocol = r.getClient().getNamenode();
-    this.routerFS = r.getFileSystem();
-  }
-
-  protected FileSystem getRouterFileSystem() {
-    return this.routerFS;
-  }
-
-  protected FileSystem getNamenodeFileSystem() {
-    return this.nnFS;
-  }
-
-  protected ClientProtocol getRouterProtocol() {
-    return this.routerProtocol;
-  }
-
-  protected ClientProtocol getNamenodeProtocol() {
-    return this.nnProtocol;
-  }
-
-  protected NamenodeContext getNamenode() {
-    return this.namenode;
-  }
-
-  protected void setNamenodeFile(String filename) {
-    this.nnFile = filename;
-  }
-
-  protected String getNamenodeFile() {
-    return this.nnFile;
-  }
-
-  protected void setRouterFile(String filename) {
-    this.routerFile = filename;
-  }
-
-  protected String getRouterFile() {
-    return this.routerFile;
-  }
-
-  protected void setNamenode(NamenodeContext nn)
-      throws IOException, URISyntaxException {
-    this.namenode = nn;
-    this.nnProtocol = nn.getClient().getNamenode();
-    this.nnFS = nn.getFileSystem();
-  }
-
-  protected String getNs() {
-    return this.ns;
-  }
-
-  protected void setNs(String nameservice) {
-    this.ns = nameservice;
-  }
-
-  protected static void compareResponses(
-      ClientProtocol protocol1, ClientProtocol protocol2,
-      Method m, Object[] paramList) {
-
-    Object return1 = null;
-    Exception exception1 = null;
-    try {
-      return1 = m.invoke(protocol1, paramList);
-    } catch (Exception ex) {
-      exception1 = ex;
-    }
-
-    Object return2 = null;
-    Exception exception2 = null;
-    try {
-      return2 = m.invoke(protocol2, paramList);
-    } catch (Exception ex) {
-      exception2 = ex;
-    }
-
-    assertEquals(return1, return2);
-    if (exception1 == null && exception2 == null) {
-      return;
-    }
-
-    assertEquals(
-        exception1.getCause().getClass(),
-        exception2.getCause().getClass());
-  }
-
-  @Test
-  public void testProxyListFiles() throws IOException, InterruptedException,
-      URISyntaxException, NoSuchMethodException, SecurityException {
-
-    // Verify that the root listing is a union of the mount table destinations
-    // and the files stored at all nameservices mounted at the root (ns0 + ns1)
-    //
-    // / -->
-    // /ns0 (from mount table)
-    // /ns1 (from mount table)
-    // all items in / of ns0 (default NS)
-
-    // Collect the mount table entries from the root mount point
-    Set<String> requiredPaths = new TreeSet<>();
-    FileSubclusterResolver fileResolver =
-        router.getRouter().getSubclusterResolver();
-    for (String mount : fileResolver.getMountPoints("/")) {
-      requiredPaths.add(mount);
-    }
-
-    // Collect all files/dirs on the root path of the default NS
-    String defaultNs = cluster.getNameservices().get(0);
-    NamenodeContext nn = cluster.getNamenode(defaultNs, null);
-    FileStatus[] iterator = nn.getFileSystem().listStatus(new Path("/"));
-    for (FileStatus file : iterator) {
-      requiredPaths.add(file.getPath().getName());
-    }
-
-    // Fetch listing
-    DirectoryListing listing =
-        routerProtocol.getListing("/", HdfsFileStatus.EMPTY_NAME, false);
-    Iterator<String> requiredPathsIterator = requiredPaths.iterator();
-    // Match each path returned and verify order returned
-    for(HdfsFileStatus f : listing.getPartialListing()) {
-      String fileName = requiredPathsIterator.next();
-      String currentFile = f.getFullPath(new Path("/")).getName();
-      assertEquals(currentFile, fileName);
-    }
-
-    // Verify the total number of results found/matched
-    assertEquals(requiredPaths.size(), listing.getPartialListing().length);
-
-    // List a path that doesn't exist and validate error response with NN
-    // behavior.
-    Method m = ClientProtocol.class.getMethod(
-        "getListing", String.class, byte[].class, boolean.class);
-    String badPath = "/unknownlocation/unknowndir";
-    compareResponses(routerProtocol, nnProtocol, m,
-        new Object[] {badPath, HdfsFileStatus.EMPTY_NAME, false});
-  }
-
-  @Test
-  public void testProxyListFilesWithConflict()
-      throws IOException, InterruptedException {
-
-    // Add a directory to the namespace that conflicts with a mount point
-    NamenodeContext nn = cluster.getNamenode(ns, null);
-    FileSystem nnFs = nn.getFileSystem();
-    addDirectory(nnFs, cluster.getFederatedTestDirectoryForNS(ns));
-
-    FileSystem routerFs = router.getFileSystem();
-    int initialCount = countContents(routerFs, "/");
-
-    // Root file system now for NS X:
-    // / ->
-    // /ns0 (mount table)
-    // /ns1 (mount table)
-    // /target-ns0 (the target folder for the NS0 mapped to /
-    // /nsX (local directory that duplicates mount table)
-    int newCount = countContents(routerFs, "/");
-    assertEquals(initialCount, newCount);
-
-    // Verify that each root path is readable and contains one test directory
-    assertEquals(1, countContents(routerFs, 
cluster.getFederatedPathForNS(ns)));
-
-    // Verify that real folder for the ns contains a single test directory
-    assertEquals(1, countContents(nnFs, cluster.getNamenodePathForNS(ns)));
-
-  }
-
-  protected void testRename(RouterContext testRouter, String filename,
-      String renamedFile, boolean exceptionExpected) throws IOException {
-
-    createFile(testRouter.getFileSystem(), filename, 32);
-    // verify
-    verifyFileExists(testRouter.getFileSystem(), filename);
-    // rename
-    boolean exceptionThrown = false;
-    try {
-      DFSClient client = testRouter.getClient();
-      ClientProtocol clientProtocol = client.getNamenode();
-      clientProtocol.rename(filename, renamedFile);
-    } catch (Exception ex) {
-      exceptionThrown = true;
-    }
-    if (exceptionExpected) {
-      // Error was expected
-      assertTrue(exceptionThrown);
-      FileContext fileContext = testRouter.getFileContext();
-      assertTrue(fileContext.delete(new Path(filename), true));
-    } else {
-      // No error was expected
-      assertFalse(exceptionThrown);
-      // verify
-      assertTrue(verifyFileExists(testRouter.getFileSystem(), renamedFile));
-      // delete
-      FileContext fileContext = testRouter.getFileContext();
-      assertTrue(fileContext.delete(new Path(renamedFile), true));
-    }
-  }
-
-  protected void testRename2(RouterContext testRouter, String filename,
-      String renamedFile, boolean exceptionExpected) throws IOException {
-    createFile(testRouter.getFileSystem(), filename, 32);
-    // verify
-    verifyFileExists(testRouter.getFileSystem(), filename);
-    // rename
-    boolean exceptionThrown = false;
-    try {
-      DFSClient client = testRouter.getClient();
-      ClientProtocol clientProtocol = client.getNamenode();
-      clientProtocol.rename2(filename, renamedFile, new Options.Rename[] {});
-    } catch (Exception ex) {
-      exceptionThrown = true;
-    }
-    assertEquals(exceptionExpected, exceptionThrown);
-    if (exceptionExpected) {
-      // Error was expected
-      FileContext fileContext = testRouter.getFileContext();
-      assertTrue(fileContext.delete(new Path(filename), true));
-    } else {
-      // verify
-      assertTrue(verifyFileExists(testRouter.getFileSystem(), renamedFile));
-      // delete
-      FileContext fileContext = testRouter.getFileContext();
-      assertTrue(fileContext.delete(new Path(renamedFile), true));
-    }
-  }
-
-  @Test
-  public void testProxyRenameFiles() throws IOException, InterruptedException {
-
-    Thread.sleep(5000);
-    List<String> nss = cluster.getNameservices();
-    String ns0 = nss.get(0);
-    String ns1 = nss.get(1);
-
-    // Rename within the same namespace
-    // /ns0/testdir/testrename -> /ns0/testdir/testrename-append
-    String filename =
-        cluster.getFederatedTestDirectoryForNS(ns0) + "/testrename";
-    String renamedFile = filename + "-append";
-    testRename(router, filename, renamedFile, false);
-    testRename2(router, filename, renamedFile, false);
-
-    // Rename a file to a destination that is in a different namespace (fails)
-    filename = cluster.getFederatedTestDirectoryForNS(ns0) + "/testrename";
-    renamedFile = cluster.getFederatedTestDirectoryForNS(ns1) + "/testrename";
-    testRename(router, filename, renamedFile, true);
-    testRename2(router, filename, renamedFile, true);
-  }
-
-  @Test
-  public void testProxyChownFiles() throws Exception {
-
-    String newUsername = "TestUser";
-    String newGroup = "TestGroup";
-
-    // change owner
-    routerProtocol.setOwner(routerFile, newUsername, newGroup);
-
-    // Verify with NN
-    FileStatus file = getFileStatus(namenode.getFileSystem(), nnFile);
-    assertEquals(file.getOwner(), newUsername);
-    assertEquals(file.getGroup(), newGroup);
-
-    // Bad request and validate router response matches NN response.
-    Method m = ClientProtocol.class.getMethod("setOwner", String.class,
-        String.class, String.class);
-    String badPath = "/unknownlocation/unknowndir";
-    compareResponses(routerProtocol, nnProtocol, m,
-        new Object[] {badPath, newUsername, newGroup});
-  }
-
-  @Test
-  public void testProxyGetStats() throws Exception {
-    // Some of the statistics are out of sync because of the mini cluster
-    Supplier<Boolean> check = new Supplier<Boolean>() {
-      @Override
-      public Boolean get() {
-        try {
-          long[] combinedData = routerProtocol.getStats();
-          long[] individualData = getAggregateStats();
-          int len = Math.min(combinedData.length, individualData.length);
-          for (int i = 0; i < len; i++) {
-            if (combinedData[i] != individualData[i]) {
-              LOG.error("Stats for {} don't match: {} != {}",
-                  i, combinedData[i], individualData[i]);
-              return false;
-            }
-          }
-          return true;
-        } catch (Exception e) {
-          LOG.error("Cannot get stats: {}", e.getMessage());
-          return false;
-        }
-      }
-    };
-    GenericTestUtils.waitFor(check, 500, 5 * 1000);
-  }
-
-  /**
-   * Get the sum of each subcluster statistics.
-   * @return Aggregated statistics.
-   * @throws Exception If it cannot get the stats from the Router or Namenode.
-   */
-  private long[] getAggregateStats() throws Exception {
-    long[] individualData = new long[10];
-    for (String nameservice : cluster.getNameservices()) {
-      NamenodeContext n = cluster.getNamenode(nameservice, null);
-      DFSClient client = n.getClient();
-      ClientProtocol clientProtocol = client.getNamenode();
-      long[] data = clientProtocol.getStats();
-      for (int i = 0; i < data.length; i++) {
-        individualData[i] += data[i];
-      }
-    }
-    return individualData;
-  }
-
-  @Test
-  public void testProxyGetDatanodeReport() throws Exception {
-
-    DatanodeInfo[] combinedData =
-        routerProtocol.getDatanodeReport(DatanodeReportType.ALL);
-
-    Set<Integer> individualData = new HashSet<Integer>();
-    for (String nameservice : cluster.getNameservices()) {
-      NamenodeContext n = cluster.getNamenode(nameservice, null);
-      DFSClient client = n.getClient();
-      ClientProtocol clientProtocol = client.getNamenode();
-      DatanodeInfo[] data =
-          clientProtocol.getDatanodeReport(DatanodeReportType.ALL);
-      for (int i = 0; i < data.length; i++) {
-        // Collect unique DNs based on their xfer port
-        DatanodeInfo info = data[i];
-        individualData.add(info.getXferPort());
-      }
-    }
-    assertEquals(combinedData.length, individualData.size());
-  }
-
-  @Test
-  public void testProxyGetDatanodeStorageReport()
-      throws IOException, InterruptedException, URISyntaxException {
-
-    DatanodeStorageReport[] combinedData =
-        routerProtocol.getDatanodeStorageReport(DatanodeReportType.ALL);
-
-    Set<String> individualData = new HashSet<>();
-    for (String nameservice : cluster.getNameservices()) {
-      NamenodeContext n = cluster.getNamenode(nameservice, null);
-      DFSClient client = n.getClient();
-      ClientProtocol clientProtocol = client.getNamenode();
-      DatanodeStorageReport[] data =
-          clientProtocol.getDatanodeStorageReport(DatanodeReportType.ALL);
-      for (DatanodeStorageReport report : data) {
-        // Determine unique DN instances
-        DatanodeInfo dn = report.getDatanodeInfo();
-        individualData.add(dn.toString());
-      }
-    }
-    assertEquals(combinedData.length, individualData.size());
-  }
-
-  @Test
-  public void testProxyMkdir() throws Exception {
-
-    // Check the initial folders
-    FileStatus[] filesInitial = routerFS.listStatus(new Path("/"));
-
-    // Create a directory via the router at the root level
-    String dirPath = "/testdir";
-    FsPermission permission = new FsPermission("705");
-    routerProtocol.mkdirs(dirPath, permission, false);
-
-    // Verify the root listing has the item via the router
-    FileStatus[] files = routerFS.listStatus(new Path("/"));
-    assertEquals(Arrays.toString(files) + " should be " +
-        Arrays.toString(filesInitial) + " + " + dirPath,
-        filesInitial.length + 1, files.length);
-    assertTrue(verifyFileExists(routerFS, dirPath));
-
-    // Verify the directory is present in only 1 Namenode
-    int foundCount = 0;
-    for (NamenodeContext n : cluster.getNamenodes()) {
-      if (verifyFileExists(n.getFileSystem(), dirPath)) {
-        foundCount++;
-      }
-    }
-    assertEquals(1, foundCount);
-    assertTrue(deleteFile(routerFS, dirPath));
-
-    // Validate router failure response matches NN failure response.
-    Method m = ClientProtocol.class.getMethod("mkdirs", String.class,
-        FsPermission.class, boolean.class);
-    String badPath = "/unknownlocation/unknowndir";
-    compareResponses(routerProtocol, nnProtocol, m,
-        new Object[] {badPath, permission, false});
-  }
-
-  @Test
-  public void testProxyChmodFiles() throws Exception {
-
-    FsPermission permission = new FsPermission("444");
-
-    // change permissions
-    routerProtocol.setPermission(routerFile, permission);
-
-    // Validate permissions NN
-    FileStatus file = getFileStatus(namenode.getFileSystem(), nnFile);
-    assertEquals(permission, file.getPermission());
-
-    // Validate router failure response matches NN failure response.
-    Method m = ClientProtocol.class.getMethod(
-        "setPermission", String.class, FsPermission.class);
-    String badPath = "/unknownlocation/unknowndir";
-    compareResponses(routerProtocol, nnProtocol, m,
-        new Object[] {badPath, permission});
-  }
-
-  @Test
-  public void testProxySetReplication() throws Exception {
-
-    // Check current replication via NN
-    FileStatus file = getFileStatus(nnFS, nnFile);
-    assertEquals(1, file.getReplication());
-
-    // increment replication via router
-    routerProtocol.setReplication(routerFile, (short) 2);
-
-    // Verify via NN
-    file = getFileStatus(nnFS, nnFile);
-    assertEquals(2, file.getReplication());
-
-    // Validate router failure response matches NN failure response.
-    Method m = ClientProtocol.class.getMethod(
-        "setReplication", String.class, short.class);
-    String badPath = "/unknownlocation/unknowndir";
-    compareResponses(routerProtocol, nnProtocol, m,
-        new Object[] {badPath, (short) 2});
-  }
-
-  @Test
-  public void testProxyTruncateFile() throws Exception {
-
-    // Check file size via NN
-    FileStatus file = getFileStatus(nnFS, nnFile);
-    assertTrue(file.getLen() > 0);
-
-    // Truncate to 0 bytes via router
-    routerProtocol.truncate(routerFile, 0, "testclient");
-
-    // Verify via NN
-    file = getFileStatus(nnFS, nnFile);
-    assertEquals(0, file.getLen());
-
-    // Validate router failure response matches NN failure response.
-    Method m = ClientProtocol.class.getMethod(
-        "truncate", String.class, long.class, String.class);
-    String badPath = "/unknownlocation/unknowndir";
-    compareResponses(routerProtocol, nnProtocol, m,
-        new Object[] {badPath, (long) 0, "testclient"});
-  }
-
-  @Test
-  public void testProxyGetBlockLocations() throws Exception {
-
-    // Fetch block locations via router
-    LocatedBlocks locations =
-        routerProtocol.getBlockLocations(routerFile, 0, 1024);
-    assertEquals(1, locations.getLocatedBlocks().size());
-
-    // Validate router failure response matches NN failure response.
-    Method m = ClientProtocol.class.getMethod(
-        "getBlockLocations", String.class, long.class, long.class);
-    String badPath = "/unknownlocation/unknowndir";
-    compareResponses(routerProtocol, nnProtocol,
-        m, new Object[] {badPath, (long) 0, (long) 0});
-  }
-
-  @Test
-  public void testProxyStoragePolicy() throws Exception {
-
-    // Query initial policy via NN
-    HdfsFileStatus status = namenode.getClient().getFileInfo(nnFile);
-
-    // Set a random policy via router
-    BlockStoragePolicy[] policies = namenode.getClient().getStoragePolicies();
-    BlockStoragePolicy policy = policies[0];
-
-    while (policy.isCopyOnCreateFile()) {
-      // Pick a non copy on create policy
-      Random rand = new Random();
-      int randIndex = rand.nextInt(policies.length);
-      policy = policies[randIndex];
-    }
-    routerProtocol.setStoragePolicy(routerFile, policy.getName());
-
-    // Verify policy via NN
-    HdfsFileStatus newStatus = namenode.getClient().getFileInfo(nnFile);
-    assertTrue(newStatus.getStoragePolicy() == policy.getId());
-    assertTrue(newStatus.getStoragePolicy() != status.getStoragePolicy());
-
-    // Validate router failure response matches NN failure response.
-    Method m = ClientProtocol.class.getMethod("setStoragePolicy", String.class,
-        String.class);
-    String badPath = "/unknownlocation/unknowndir";
-    compareResponses(routerProtocol, nnProtocol,
-        m, new Object[] {badPath, "badpolicy"});
-  }
-
-  @Test
-  public void testProxyGetPreferedBlockSize() throws Exception {
-
-    // Query via NN and Router and verify
-    long namenodeSize = nnProtocol.getPreferredBlockSize(nnFile);
-    long routerSize = routerProtocol.getPreferredBlockSize(routerFile);
-    assertEquals(routerSize, namenodeSize);
-
-    // Validate router failure response matches NN failure response.
-    Method m = ClientProtocol.class.getMethod(
-        "getPreferredBlockSize", String.class);
-    String badPath = "/unknownlocation/unknowndir";
-    compareResponses(
-        routerProtocol, nnProtocol, m, new Object[] {badPath});
-  }
-
-  private void testConcat(
-      String source, String target, boolean failureExpected) {
-    boolean failure = false;
-    try {
-      // Concat test file with fill block length file via router
-      routerProtocol.concat(target, new String[] {source});
-    } catch (IOException ex) {
-      failure = true;
-    }
-    assertEquals(failureExpected, failure);
-  }
-
-  @Test
-  public void testProxyConcatFile() throws Exception {
-
-    // Create a stub file in the primary ns
-    String sameNameservice = ns;
-    String existingFile =
-        cluster.getFederatedTestDirectoryForNS(sameNameservice) +
-        "_concatfile";
-    int existingFileSize = 32;
-    createFile(routerFS, existingFile, existingFileSize);
-
-    // Identify an alternate nameservice that doesn't match the existing file
-    String alternateNameservice = null;
-    for (String n : cluster.getNameservices()) {
-      if (!n.equals(sameNameservice)) {
-        alternateNameservice = n;
-        break;
-      }
-    }
-
-    // Create new files, must be a full block to use concat. One file is in the
-    // same namespace as the target file, the other is in a different 
namespace.
-    String altRouterFile =
-        cluster.getFederatedTestDirectoryForNS(alternateNameservice) +
-        "_newfile";
-    String sameRouterFile =
-        cluster.getFederatedTestDirectoryForNS(sameNameservice) +
-        "_newfile";
-    createFile(routerFS, altRouterFile, DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
-    createFile(routerFS, sameRouterFile, DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
-
-    // Concat in different namespaces, fails
-    testConcat(existingFile, altRouterFile, true);
-
-    // Concat in same namespaces, succeeds
-    testConcat(existingFile, sameRouterFile, false);
-
-    // Check target file length
-    FileStatus status = getFileStatus(routerFS, sameRouterFile);
-    assertEquals(
-        existingFileSize + DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT,
-        status.getLen());
-
-    // Validate router failure response matches NN failure response.
-    Method m = ClientProtocol.class.getMethod(
-        "concat", String.class, String[].class);
-    String badPath = "/unknownlocation/unknowndir";
-    compareResponses(routerProtocol, nnProtocol, m,
-        new Object[] {badPath, new String[] {routerFile}});
-  }
-
-  @Test
-  public void testProxyAppend() throws Exception {
-
-    // Append a test string via router
-    EnumSet<CreateFlag> createFlag = EnumSet.of(CreateFlag.APPEND);
-    DFSClient routerClient = getRouterContext().getClient();
-    HdfsDataOutputStream stream =
-        routerClient.append(routerFile, 1024, createFlag, null, null);
-    stream.writeBytes(TEST_STRING);
-    stream.close();
-
-    // Verify file size via NN
-    FileStatus status = getFileStatus(nnFS, nnFile);
-    assertTrue(status.getLen() > TEST_STRING.length());
-
-    // Validate router failure response matches NN failure response.
-    Method m = ClientProtocol.class.getMethod("append", String.class,
-        String.class, EnumSetWritable.class);
-    String badPath = "/unknownlocation/unknowndir";
-    EnumSetWritable<CreateFlag> createFlagWritable =
-        new EnumSetWritable<CreateFlag>(createFlag);
-    compareResponses(routerProtocol, nnProtocol, m,
-        new Object[] {badPath, "testClient", createFlagWritable});
-  }
-
-  @Test
-  public void testProxyGetAdditionalDatanode()
-      throws IOException, InterruptedException, URISyntaxException {
-
-    // Use primitive APIs to open a file, add a block, and get datanode 
location
-    EnumSet<CreateFlag> createFlag = EnumSet.of(CreateFlag.CREATE);
-    String clientName = getRouterContext().getClient().getClientName();
-    String newRouterFile = routerFile + "_additionalDatanode";
-    HdfsFileStatus status = routerProtocol.create(
-        newRouterFile, new FsPermission("777"), clientName,
-        new EnumSetWritable<CreateFlag>(createFlag), true, (short) 1,
-        (long) 1024, CryptoProtocolVersion.supported());
-
-    // Add a block via router (requires client to have same lease)
-    LocatedBlock block = routerProtocol.addBlock(
-        newRouterFile, clientName, null, null,
-        status.getFileId(), null, null);
-
-    DatanodeInfo[] exclusions = new DatanodeInfo[0];
-    LocatedBlock newBlock = routerProtocol.getAdditionalDatanode(
-        newRouterFile, status.getFileId(), block.getBlock(),
-        block.getLocations(), block.getStorageIDs(), exclusions, 1, 
clientName);
-    assertNotNull(newBlock);
-  }
-
-  @Test
-  public void testProxyCreateFileAlternateUser()
-      throws IOException, URISyntaxException, InterruptedException {
-
-    // Create via Router
-    String routerDir = cluster.getFederatedTestDirectoryForNS(ns);
-    String namenodeDir = cluster.getNamenodeTestDirectoryForNS(ns);
-    String newRouterFile = routerDir + "/unknownuser";
-    String newNamenodeFile = namenodeDir + "/unknownuser";
-    String username = "unknownuser";
-
-    // Allow all user access to dir
-    namenode.getFileContext().setPermission(
-        new Path(namenodeDir), new FsPermission("777"));
-
-    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(username);
-    DFSClient client = getRouterContext().getClient(ugi);
-    client.create(newRouterFile, true);
-
-    // Fetch via NN and check user
-    FileStatus status = getFileStatus(nnFS, newNamenodeFile);
-    assertEquals(status.getOwner(), username);
-  }
-
-  @Test
-  public void testProxyGetFileInfoAcessException() throws IOException {
-
-    UserGroupInformation ugi =
-        UserGroupInformation.createRemoteUser("unknownuser");
-
-    // List files from the NN and trap the exception
-    Exception nnFailure = null;
-    try {
-      String testFile = cluster.getNamenodeTestFileForNS(ns);
-      namenode.getClient(ugi).getLocatedBlocks(testFile, 0);
-    } catch (Exception e) {
-      nnFailure = e;
-    }
-    assertNotNull(nnFailure);
-
-    // List files from the router and trap the exception
-    Exception routerFailure = null;
-    try {
-      String testFile = cluster.getFederatedTestFileForNS(ns);
-      getRouterContext().getClient(ugi).getLocatedBlocks(testFile, 0);
-    } catch (Exception e) {
-      routerFailure = e;
-    }
-    assertNotNull(routerFailure);
-
-    assertEquals(routerFailure.getClass(), nnFailure.getClass());
-  }
-}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to