http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MountTablePBImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MountTablePBImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MountTablePBImpl.java
new file mode 100644
index 0000000..9667489
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MountTablePBImpl.java
@@ -0,0 +1,317 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.records.impl.pb;
+
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.List;
+
+import org.apache.hadoop.fs.permission.FsPermission;
+import 
org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.MountTableRecordProto;
+import 
org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.MountTableRecordProto.Builder;
+import 
org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.MountTableRecordProto.DestOrder;
+import 
org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.MountTableRecordProtoOrBuilder;
+import 
org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoteLocationProto;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto;
+import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
+import org.apache.hadoop.hdfs.server.federation.router.RouterAdminServer;
+import org.apache.hadoop.hdfs.server.federation.router.RouterPermissionChecker;
+import org.apache.hadoop.hdfs.server.federation.router.RouterQuotaUsage;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.FederationProtocolPBTranslator;
+import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+
+import com.google.protobuf.Message;
+
+/**
+ * Protobuf implementation of the MountTable record.
+ */
+public class MountTablePBImpl extends MountTable implements PBRecord {
+
+  private FederationProtocolPBTranslator<MountTableRecordProto, Builder,
+      MountTableRecordProtoOrBuilder> translator =
+          new FederationProtocolPBTranslator<MountTableRecordProto, Builder,
+              MountTableRecordProtoOrBuilder>(MountTableRecordProto.class);
+
+  public MountTablePBImpl() {
+  }
+
+  public MountTablePBImpl(MountTableRecordProto proto) {
+    this.setProto(proto);
+  }
+
+  @Override
+  public MountTableRecordProto getProto() {
+    return this.translator.build();
+  }
+
+  @Override
+  public void setProto(Message proto) {
+    this.translator.setProto(proto);
+  }
+
+  @Override
+  public void readInstance(String base64String) throws IOException {
+    this.translator.readInstance(base64String);
+  }
+
+  @Override
+  public String getSourcePath() {
+    MountTableRecordProtoOrBuilder proto = this.translator.getProtoOrBuilder();
+    if (!proto.hasSrcPath()) {
+      return null;
+    }
+    return proto.getSrcPath();
+  }
+
+  @Override
+  public void setSourcePath(String path) {
+    Builder builder = this.translator.getBuilder();
+    if (path == null) {
+      builder.clearSrcPath();
+    } else {
+      builder.setSrcPath(path);
+    }
+  }
+
+  @Override
+  public List<RemoteLocation> getDestinations() {
+    MountTableRecordProtoOrBuilder proto = this.translator.getProtoOrBuilder();
+    if (proto.getDestinationsCount() == 0) {
+      return null;
+    }
+
+    final List<RemoteLocation> ret = new LinkedList<>();
+    final List<RemoteLocationProto> destList = proto.getDestinationsList();
+    for (RemoteLocationProto dest : destList) {
+      String nsId = dest.getNameserviceId();
+      String path = dest.getPath();
+      RemoteLocation loc = new RemoteLocation(nsId, path);
+      ret.add(loc);
+    }
+    return ret;
+  }
+
+  @Override
+  public void setDestinations(final List<RemoteLocation> dests) {
+    Builder builder = this.translator.getBuilder();
+    builder.clearDestinations();
+    for (RemoteLocation dest : dests) {
+      RemoteLocationProto.Builder itemBuilder =
+          RemoteLocationProto.newBuilder();
+      String nsId = dest.getNameserviceId();
+      String path = dest.getDest();
+      itemBuilder.setNameserviceId(nsId);
+      itemBuilder.setPath(path);
+      RemoteLocationProto item = itemBuilder.build();
+      builder.addDestinations(item);
+    }
+  }
+
+  @Override
+  public boolean addDestination(String nsId, String path) {
+    // Check if the location is already there
+    List<RemoteLocation> dests = getDestinations();
+    for (RemoteLocation dest : dests) {
+      if (dest.getNameserviceId().equals(nsId) && dest.getDest().equals(path)) 
{
+        return false;
+      }
+    }
+
+    // Add it to the existing list
+    Builder builder = this.translator.getBuilder();
+    RemoteLocationProto.Builder itemBuilder =
+        RemoteLocationProto.newBuilder();
+    itemBuilder.setNameserviceId(nsId);
+    itemBuilder.setPath(path);
+    RemoteLocationProto item = itemBuilder.build();
+    builder.addDestinations(item);
+    return true;
+  }
+
+  @Override
+  public void setDateModified(long time) {
+    this.translator.getBuilder().setDateModified(time);
+  }
+
+  @Override
+  public long getDateModified() {
+    return this.translator.getProtoOrBuilder().getDateModified();
+  }
+
+  @Override
+  public void setDateCreated(long time) {
+    this.translator.getBuilder().setDateCreated(time);
+  }
+
+  @Override
+  public long getDateCreated() {
+    return this.translator.getProtoOrBuilder().getDateCreated();
+  }
+
+  @Override
+  public boolean isReadOnly() {
+    MountTableRecordProtoOrBuilder proto = this.translator.getProtoOrBuilder();
+    if (!proto.hasReadOnly()) {
+      return false;
+    }
+    return proto.getReadOnly();
+  }
+
+  @Override
+  public void setReadOnly(boolean ro) {
+    this.translator.getBuilder().setReadOnly(ro);
+  }
+
+  @Override
+  public DestinationOrder getDestOrder() {
+    MountTableRecordProtoOrBuilder proto = this.translator.getProtoOrBuilder();
+    return convert(proto.getDestOrder());
+  }
+
+  @Override
+  public void setDestOrder(DestinationOrder order) {
+    Builder builder = this.translator.getBuilder();
+    if (order == null) {
+      builder.clearDestOrder();
+    } else {
+      builder.setDestOrder(convert(order));
+    }
+  }
+
+  @Override
+  public String getOwnerName() {
+    MountTableRecordProtoOrBuilder proto = this.translator.getProtoOrBuilder();
+    if (!proto.hasOwnerName()) {
+      return RouterAdminServer.getSuperUser();
+    }
+    return proto.getOwnerName();
+  }
+
+  @Override
+  public void setOwnerName(String owner) {
+    Builder builder = this.translator.getBuilder();
+    if (owner == null) {
+      builder.clearOwnerName();
+    } else {
+      builder.setOwnerName(owner);
+    }
+  }
+
+  @Override
+  public String getGroupName() {
+    MountTableRecordProtoOrBuilder proto = this.translator.getProtoOrBuilder();
+    if (!proto.hasGroupName()) {
+      return RouterAdminServer.getSuperGroup();
+    }
+    return proto.getGroupName();
+  }
+
+  @Override
+  public void setGroupName(String group) {
+    Builder builder = this.translator.getBuilder();
+    if (group == null) {
+      builder.clearGroupName();
+    } else {
+      builder.setGroupName(group);
+    }
+  }
+
+  @Override
+  public FsPermission getMode() {
+    MountTableRecordProtoOrBuilder proto = this.translator.getProtoOrBuilder();
+    short mode = RouterPermissionChecker.MOUNT_TABLE_PERMISSION_DEFAULT;
+    if (proto.hasMode()) {
+      mode = (short) proto.getMode();
+    }
+    return new FsPermission(mode);
+  }
+
+  @Override
+  public void setMode(FsPermission mode) {
+    Builder builder = this.translator.getBuilder();
+    if (mode == null) {
+      builder.clearMode();
+    } else {
+      builder.setMode(mode.toShort());
+    }
+  }
+
+  @Override
+  public RouterQuotaUsage getQuota() {
+    MountTableRecordProtoOrBuilder proto = this.translator.getProtoOrBuilder();
+
+    long nsQuota = HdfsConstants.QUOTA_DONT_SET;
+    long nsCount = RouterQuotaUsage.QUOTA_USAGE_COUNT_DEFAULT;
+    long ssQuota = HdfsConstants.QUOTA_DONT_SET;
+    long ssCount = RouterQuotaUsage.QUOTA_USAGE_COUNT_DEFAULT;
+    if (proto.hasQuota()) {
+      QuotaUsageProto quotaProto = proto.getQuota();
+      nsQuota = quotaProto.getQuota();
+      nsCount = quotaProto.getFileAndDirectoryCount();
+      ssQuota = quotaProto.getSpaceQuota();
+      ssCount = quotaProto.getSpaceConsumed();
+    }
+
+    RouterQuotaUsage.Builder builder = new RouterQuotaUsage.Builder()
+        .quota(nsQuota).fileAndDirectoryCount(nsCount).spaceQuota(ssQuota)
+        .spaceConsumed(ssCount);
+    return builder.build();
+  }
+
+  @Override
+  public void setQuota(RouterQuotaUsage quota) {
+    Builder builder = this.translator.getBuilder();
+    if (quota == null) {
+      builder.clearQuota();
+    } else {
+      QuotaUsageProto quotaUsage = QuotaUsageProto.newBuilder()
+          .setFileAndDirectoryCount(quota.getFileAndDirectoryCount())
+          
.setQuota(quota.getQuota()).setSpaceConsumed(quota.getSpaceConsumed())
+          .setSpaceQuota(quota.getSpaceQuota()).build();
+      builder.setQuota(quotaUsage);
+    }
+  }
+
+  private DestinationOrder convert(DestOrder order) {
+    switch (order) {
+    case LOCAL:
+      return DestinationOrder.LOCAL;
+    case RANDOM:
+      return DestinationOrder.RANDOM;
+    case HASH_ALL:
+      return DestinationOrder.HASH_ALL;
+    default:
+      return DestinationOrder.HASH;
+    }
+  }
+
+  private DestOrder convert(DestinationOrder order) {
+    switch (order) {
+    case LOCAL:
+      return DestOrder.LOCAL;
+    case RANDOM:
+      return DestOrder.RANDOM;
+    case HASH_ALL:
+      return DestOrder.HASH_ALL;
+    default:
+      return DestOrder.HASH;
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/PBRecord.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/PBRecord.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/PBRecord.java
new file mode 100644
index 0000000..c369275
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/PBRecord.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.records.impl.pb;
+
+import java.io.IOException;
+
+import com.google.protobuf.Message;
+
+/**
+ * A record implementation using Protobuf.
+ */
+public interface PBRecord {
+
+  /**
+   * Get the protocol for the record.
+   * @return The protocol for this record.
+   */
+  Message getProto();
+
+  /**
+   * Set the protocol for the record.
+   * @param proto Protocol for this record.
+   */
+  void setProto(Message proto);
+
+  /**
+   * Populate this record with serialized data.
+   * @param base64String Serialized data in base64.
+   * @throws IOException If it cannot read the data.
+   */
+  void readInstance(String base64String) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/RouterStatePBImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/RouterStatePBImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/RouterStatePBImpl.java
new file mode 100644
index 0000000..23a61f9
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/RouterStatePBImpl.java
@@ -0,0 +1,202 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.records.impl.pb;
+
+import java.io.IOException;
+
+import 
org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RouterRecordProto;
+import 
org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RouterRecordProto.Builder;
+import 
org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RouterRecordProtoOrBuilder;
+import 
org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.StateStoreVersionRecordProto;
+import org.apache.hadoop.hdfs.server.federation.router.RouterServiceState;
+import 
org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.FederationProtocolPBTranslator;
+import org.apache.hadoop.hdfs.server.federation.store.records.RouterState;
+import 
org.apache.hadoop.hdfs.server.federation.store.records.StateStoreVersion;
+
+import com.google.protobuf.Message;
+
+/**
+ * Protobuf implementation of the RouterState record.
+ */
+public class RouterStatePBImpl extends RouterState implements PBRecord {
+
+  private FederationProtocolPBTranslator<RouterRecordProto, Builder,
+      RouterRecordProtoOrBuilder> translator =
+          new FederationProtocolPBTranslator<RouterRecordProto, Builder,
+             RouterRecordProtoOrBuilder>(RouterRecordProto.class);
+
+  public RouterStatePBImpl() {
+  }
+
+  public RouterStatePBImpl(RouterRecordProto proto) {
+    this.translator.setProto(proto);
+  }
+
+  @Override
+  public RouterRecordProto getProto() {
+    return this.translator.build();
+  }
+
+  @Override
+  public void setProto(Message proto) {
+    this.translator.setProto(proto);
+  }
+
+  @Override
+  public void readInstance(String base64String) throws IOException {
+    this.translator.readInstance(base64String);
+  }
+
+  @Override
+  public void setAddress(String address) {
+    RouterRecordProto.Builder builder = this.translator.getBuilder();
+    if (address == null) {
+      builder.clearAddress();
+    } else {
+      builder.setAddress(address);
+    }
+  }
+
+  @Override
+  public String getAddress() {
+    RouterRecordProtoOrBuilder proto = this.translator.getProtoOrBuilder();
+    if (!proto.hasAddress()) {
+      return null;
+    }
+    return proto.getAddress();
+  }
+
+  @Override
+  public void setStateStoreVersion(StateStoreVersion version) {
+    RouterRecordProto.Builder builder = this.translator.getBuilder();
+    if (version instanceof StateStoreVersionPBImpl) {
+      StateStoreVersionPBImpl versionPB = (StateStoreVersionPBImpl)version;
+      StateStoreVersionRecordProto versionProto =
+          (StateStoreVersionRecordProto)versionPB.getProto();
+      builder.setStateStoreVersion(versionProto);
+    } else {
+      builder.clearStateStoreVersion();
+    }
+  }
+
+  @Override
+  public StateStoreVersion getStateStoreVersion() throws IOException {
+    RouterRecordProtoOrBuilder proto = this.translator.getProtoOrBuilder();
+    if (!proto.hasStateStoreVersion()) {
+      return null;
+    }
+    StateStoreVersionRecordProto versionProto = proto.getStateStoreVersion();
+    StateStoreVersion version =
+        StateStoreSerializer.newRecord(StateStoreVersion.class);
+    if (version instanceof StateStoreVersionPBImpl) {
+      StateStoreVersionPBImpl versionPB = (StateStoreVersionPBImpl)version;
+      versionPB.setProto(versionProto);
+      return versionPB;
+    } else {
+      throw new IOException("Cannot get State Store version");
+    }
+  }
+
+  @Override
+  public RouterServiceState getStatus() {
+    RouterRecordProtoOrBuilder proto = this.translator.getProtoOrBuilder();
+    if (!proto.hasStatus()) {
+      return null;
+    }
+    return RouterServiceState.valueOf(proto.getStatus());
+  }
+
+  @Override
+  public void setStatus(RouterServiceState newStatus) {
+    RouterRecordProto.Builder builder = this.translator.getBuilder();
+    if (newStatus == null) {
+      builder.clearStatus();
+    } else {
+      builder.setStatus(newStatus.toString());
+    }
+  }
+
+  @Override
+  public String getVersion() {
+    RouterRecordProtoOrBuilder proto = this.translator.getProtoOrBuilder();
+    if (!proto.hasVersion()) {
+      return null;
+    }
+    return proto.getVersion();
+  }
+
+  @Override
+  public void setVersion(String version) {
+    RouterRecordProto.Builder builder = this.translator.getBuilder();
+    if (version == null) {
+      builder.clearVersion();
+    } else {
+      builder.setVersion(version);
+    }
+  }
+
+  @Override
+  public String getCompileInfo() {
+    RouterRecordProtoOrBuilder proto = this.translator.getProtoOrBuilder();
+    if (!proto.hasCompileInfo()) {
+      return null;
+    }
+    return proto.getCompileInfo();
+  }
+
+  @Override
+  public void setCompileInfo(String info) {
+    RouterRecordProto.Builder builder = this.translator.getBuilder();
+    if (info == null) {
+      builder.clearCompileInfo();
+    } else {
+      builder.setCompileInfo(info);
+    }
+  }
+
+  @Override
+  public void setDateStarted(long dateStarted) {
+    this.translator.getBuilder().setDateStarted(dateStarted);
+  }
+
+  @Override
+  public long getDateStarted() {
+    return this.translator.getProtoOrBuilder().getDateStarted();
+  }
+
+  @Override
+  public void setDateModified(long time) {
+    this.translator.getBuilder().setDateModified(time);
+  }
+
+  @Override
+  public long getDateModified() {
+    return this.translator.getProtoOrBuilder().getDateModified();
+  }
+
+  @Override
+  public void setDateCreated(long time) {
+    this.translator.getBuilder().setDateCreated(time);
+  }
+
+  @Override
+  public long getDateCreated() {
+    return this.translator.getProtoOrBuilder().getDateCreated();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/StateStoreVersionPBImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/StateStoreVersionPBImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/StateStoreVersionPBImpl.java
new file mode 100644
index 0000000..7696136
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/StateStoreVersionPBImpl.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.records.impl.pb;
+
+import java.io.IOException;
+
+import 
org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.StateStoreVersionRecordProto;
+import 
org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.StateStoreVersionRecordProto.Builder;
+import 
org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.StateStoreVersionRecordProtoOrBuilder;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.FederationProtocolPBTranslator;
+import 
org.apache.hadoop.hdfs.server.federation.store.records.StateStoreVersion;
+
+import com.google.protobuf.Message;
+
+/**
+ * Protobuf implementation of the StateStoreVersion record.
+ */
+public class StateStoreVersionPBImpl extends StateStoreVersion
+    implements PBRecord {
+
+  private FederationProtocolPBTranslator<StateStoreVersionRecordProto, Builder,
+      StateStoreVersionRecordProtoOrBuilder> translator =
+          new FederationProtocolPBTranslator<StateStoreVersionRecordProto,
+              Builder, StateStoreVersionRecordProtoOrBuilder>(
+                  StateStoreVersionRecordProto.class);
+
+  public StateStoreVersionPBImpl() {
+  }
+
+  @Override
+  public StateStoreVersionRecordProto getProto() {
+    return this.translator.build();
+  }
+
+  @Override
+  public void setProto(Message proto) {
+    this.translator.setProto(proto);
+  }
+
+  @Override
+  public void readInstance(String base64String) throws IOException {
+    this.translator.readInstance(base64String);
+  }
+
+  @Override
+  public long getMembershipVersion() {
+    return this.translator.getProtoOrBuilder().getMembershipVersion();
+  }
+
+  @Override
+  public void setMembershipVersion(long version) {
+    this.translator.getBuilder().setMembershipVersion(version);
+  }
+
+  @Override
+  public long getMountTableVersion() {
+    return this.translator.getProtoOrBuilder().getMountTableVersion();
+  }
+
+  @Override
+  public void setMountTableVersion(long version) {
+    this.translator.getBuilder().setMountTableVersion(version);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/package-info.java
new file mode 100644
index 0000000..b329732
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/package-info.java
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * The protobuf implementations of state store data records defined in the
+ * org.apache.hadoop.hdfs.server.federation.store.records package. Each
+ * implementation wraps an associated protobuf proto definition.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+package org.apache.hadoop.hdfs.server.federation.store.records.impl.pb;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/package-info.java
new file mode 100644
index 0000000..63b13af
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/package-info.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Contains the abstract definitions of the state store data records. The state
+ * store supports multiple multiple data records.
+ * <p>
+ * Data records inherit from a common class
+ * {@link org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord
+ * BaseRecord}. Data records are serialized when written to the data store 
using
+ * a modular serialization implementation. The default is profobuf
+ * serialization. Data is stored as rows of records of the same type with each
+ * data member in a record representing a column.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+
+package org.apache.hadoop.hdfs.server.federation.store.records;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/utils/ConsistentHashRing.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/utils/ConsistentHashRing.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/utils/ConsistentHashRing.java
new file mode 100644
index 0000000..89273db
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/utils/ConsistentHashRing.java
@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.utils;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.hadoop.io.MD5Hash;
+
+/**
+ * Consistent hash ring to distribute items across nodes (locations). If we add
+ * or remove nodes, it minimizes the item migration.
+ */
+public class ConsistentHashRing {
+  private static final String SEPERATOR = "/";
+  private static final String VIRTUAL_NODE_FORMAT = "%s" + SEPERATOR + "%d";
+
+  /** Hash ring. */
+  private SortedMap<String, String> ring = new TreeMap<String, String>();
+  /** Entry -> num virtual nodes on ring. */
+  private Map<String, Integer> entryToVirtualNodes =
+      new HashMap<String, Integer>();
+
+  /** Synchronization. */
+  private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
+  private final Lock readLock = readWriteLock.readLock();
+  private final Lock writeLock = readWriteLock.writeLock();
+
+  public ConsistentHashRing(Set<String> locations) {
+    for (String location : locations) {
+      addLocation(location);
+    }
+  }
+
+  /**
+   * Add entry to consistent hash ring.
+   *
+   * @param location Node to add to the ring.
+   */
+  public void addLocation(String location) {
+    addLocation(location, 100);
+  }
+
+  /**
+   * Add entry to consistent hash ring.
+   *
+   * @param location Node to add to the ring.
+   * @param numVirtualNodes Number of virtual nodes to add.
+   */
+  public void addLocation(String location, int numVirtualNodes) {
+    writeLock.lock();
+    try {
+      entryToVirtualNodes.put(location, numVirtualNodes);
+      for (int i = 0; i < numVirtualNodes; i++) {
+        String key = String.format(VIRTUAL_NODE_FORMAT, location, i);
+        String hash = getHash(key);
+        ring.put(hash, key);
+      }
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  /**
+   * Remove specified entry from hash ring.
+   *
+   * @param location Node to remove from the ring.
+   */
+  public void removeLocation(String location) {
+    writeLock.lock();
+    try {
+      Integer numVirtualNodes = entryToVirtualNodes.remove(location);
+      for (int i = 0; i < numVirtualNodes; i++) {
+        String key = String.format(VIRTUAL_NODE_FORMAT, location, i);
+        String hash = getHash(key);
+        ring.remove(hash);
+      }
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  /**
+   * Return location (owner) of specified item. Owner is the next
+   * entry on the hash ring (with a hash value > hash value of item).
+   * @param item Item to look for.
+   * @return The location of the item.
+   */
+  public String getLocation(String item) {
+    readLock.lock();
+    try {
+      if (ring.isEmpty()) {
+        return null;
+      }
+      String hash = getHash(item);
+      if (!ring.containsKey(hash)) {
+        SortedMap<String, String> tailMap = ring.tailMap(hash);
+        hash = tailMap.isEmpty() ? ring.firstKey() : tailMap.firstKey();
+      }
+      String virtualNode = ring.get(hash);
+      int index = virtualNode.lastIndexOf(SEPERATOR);
+      if (index >= 0) {
+        return virtualNode.substring(0, index);
+      } else {
+        return virtualNode;
+      }
+    } finally {
+      readLock.unlock();
+    }
+  }
+
+  public String getHash(String key) {
+    return MD5Hash.digest(key).toString();
+  }
+
+  /**
+   * Get the locations in the ring.
+   * @return Set of locations in the ring.
+   */
+  public Set<String> getLocations() {
+    return entryToVirtualNodes.keySet();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/utils/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/utils/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/utils/package-info.java
new file mode 100644
index 0000000..7149675
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/utils/package-info.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Assorted utility classes and helpers for HDFS Federation.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+
+package org.apache.hadoop.hdfs.server.federation.utils;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
new file mode 100644
index 0000000..f4adbad
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
@@ -0,0 +1,627 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.tools.federation;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
+import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
+import org.apache.hadoop.hdfs.server.federation.router.RouterClient;
+import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
+import org.apache.hadoop.hdfs.server.federation.router.RouterQuotaUsage;
+import org.apache.hadoop.hdfs.server.federation.router.RouterStateManager;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeResponse;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetSafeModeRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetSafeModeResponse;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.LeaveSafeModeRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.LeaveSafeModeResponse;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class provides some Federation administrative access shell commands.
+ */
+@Private
+public class RouterAdmin extends Configured implements Tool {
+
+  private static final Logger LOG = LoggerFactory.getLogger(RouterAdmin.class);
+
+  private RouterClient client;
+
+  public static void main(String[] argv) throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    RouterAdmin admin = new RouterAdmin(conf);
+
+    int res = ToolRunner.run(admin, argv);
+    System.exit(res);
+  }
+
+  public RouterAdmin(Configuration conf) {
+    super(conf);
+  }
+
+  /**
+   * Print the usage message.
+   */
+  public void printUsage() {
+    String usage = "Federation Admin Tools:\n"
+        + "\t[-add <source> <nameservice> <destination> "
+        + "[-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL] "
+        + "-owner <owner> -group <group> -mode <mode>]\n"
+        + "\t[-rm <source>]\n"
+        + "\t[-ls <path>]\n"
+        + "\t[-setQuota <path> -nsQuota <nsQuota> -ssQuota "
+        + "<quota in bytes or quota size string>]\n"
+        + "\t[-clrQuota <path>]\n"
+        + "\t[-safemode enter | leave | get]\n";
+
+    System.out.println(usage);
+  }
+
+  @Override
+  public int run(String[] argv) throws Exception {
+    if (argv.length < 1) {
+      System.err.println("Not enough parameters specificed");
+      printUsage();
+      return -1;
+    }
+
+    int exitCode = -1;
+    int i = 0;
+    String cmd = argv[i++];
+
+    // Verify that we have enough command line parameters
+    if ("-add".equals(cmd)) {
+      if (argv.length < 4) {
+        System.err.println("Not enough parameters specificed for cmd " + cmd);
+        printUsage();
+        return exitCode;
+      }
+    } else if ("-rm".equalsIgnoreCase(cmd)) {
+      if (argv.length < 2) {
+        System.err.println("Not enough parameters specificed for cmd " + cmd);
+        printUsage();
+        return exitCode;
+      }
+    } else if ("-setQuota".equalsIgnoreCase(cmd)) {
+      if (argv.length < 4) {
+        System.err.println("Not enough parameters specificed for cmd " + cmd);
+        printUsage();
+        return exitCode;
+      }
+    } else if ("-clrQuota".equalsIgnoreCase(cmd)) {
+      if (argv.length < 2) {
+        System.err.println("Not enough parameters specificed for cmd " + cmd);
+        printUsage();
+        return exitCode;
+      }
+    } else if ("-safemode".equalsIgnoreCase(cmd)) {
+      if (argv.length < 2) {
+        System.err.println("Not enough parameters specificed for cmd " + cmd);
+        printUsage();
+        return exitCode;
+      }
+    }
+
+    // Initialize RouterClient
+    try {
+      String address = getConf().getTrimmed(
+          RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY,
+          RBFConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_DEFAULT);
+      InetSocketAddress routerSocket = NetUtils.createSocketAddr(address);
+      client = new RouterClient(routerSocket, getConf());
+    } catch (RPC.VersionMismatch v) {
+      System.err.println(
+          "Version mismatch between client and server... command aborted");
+      return exitCode;
+    } catch (IOException e) {
+      System.err.println("Bad connection to Router... command aborted");
+      return exitCode;
+    }
+
+    Exception debugException = null;
+    exitCode = 0;
+    try {
+      if ("-add".equals(cmd)) {
+        if (addMount(argv, i)) {
+          System.out.println("Successfuly added mount point " + argv[i]);
+        }
+      } else if ("-rm".equals(cmd)) {
+        if (removeMount(argv[i])) {
+          System.out.println("Successfully removed mount point " + argv[i]);
+        }
+      } else if ("-ls".equals(cmd)) {
+        if (argv.length > 1) {
+          listMounts(argv[i]);
+        } else {
+          listMounts("/");
+        }
+      } else if ("-setQuota".equals(cmd)) {
+        if (setQuota(argv, i)) {
+          System.out.println(
+              "Successfully set quota for mount point " + argv[i]);
+        }
+      } else if ("-clrQuota".equals(cmd)) {
+        if (clrQuota(argv[i])) {
+          System.out.println(
+              "Successfully clear quota for mount point " + argv[i]);
+        }
+      } else if ("-safemode".equals(cmd)) {
+        manageSafeMode(argv[i]);
+      } else {
+        printUsage();
+        return exitCode;
+      }
+    } catch (IllegalArgumentException arge) {
+      debugException = arge;
+      exitCode = -1;
+      System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage());
+      printUsage();
+    } catch (RemoteException e) {
+      // This is a error returned by the server.
+      // Print out the first line of the error message, ignore the stack trace.
+      exitCode = -1;
+      debugException = e;
+      try {
+        String[] content;
+        content = e.getLocalizedMessage().split("\n");
+        System.err.println(cmd.substring(1) + ": " + content[0]);
+        e.printStackTrace();
+      } catch (Exception ex) {
+        System.err.println(cmd.substring(1) + ": " + ex.getLocalizedMessage());
+        e.printStackTrace();
+        debugException = ex;
+      }
+    } catch (Exception e) {
+      exitCode = -1;
+      debugException = e;
+      System.err.println(cmd.substring(1) + ": " + e.getLocalizedMessage());
+      e.printStackTrace();
+    }
+    if (debugException != null) {
+      LOG.debug("Exception encountered", debugException);
+    }
+    return exitCode;
+  }
+
+  /**
+   * Add a mount table entry or update if it exists.
+   *
+   * @param parameters Parameters for the mount point.
+   * @param i Index in the parameters.
+   */
+  public boolean addMount(String[] parameters, int i) throws IOException {
+    // Mandatory parameters
+    String mount = parameters[i++];
+    String[] nss = parameters[i++].split(",");
+    String dest = parameters[i++];
+
+    // Optional parameters
+    boolean readOnly = false;
+    String owner = null;
+    String group = null;
+    FsPermission mode = null;
+    DestinationOrder order = DestinationOrder.HASH;
+    while (i < parameters.length) {
+      if (parameters[i].equals("-readonly")) {
+        readOnly = true;
+      } else if (parameters[i].equals("-order")) {
+        i++;
+        try {
+          order = DestinationOrder.valueOf(parameters[i]);
+        } catch(Exception e) {
+          System.err.println("Cannot parse order: " + parameters[i]);
+        }
+      } else if (parameters[i].equals("-owner")) {
+        i++;
+        owner = parameters[i];
+      } else if (parameters[i].equals("-group")) {
+        i++;
+        group = parameters[i];
+      } else if (parameters[i].equals("-mode")) {
+        i++;
+        short modeValue = Short.parseShort(parameters[i], 8);
+        mode = new FsPermission(modeValue);
+      }
+
+      i++;
+    }
+
+    return addMount(mount, nss, dest, readOnly, order,
+        new ACLEntity(owner, group, mode));
+  }
+
+  /**
+   * Add a mount table entry or update if it exists.
+   *
+   * @param mount Mount point.
+   * @param nss Namespaces where this is mounted to.
+   * @param dest Destination path.
+   * @param readonly If the mount point is read only.
+   * @param order Order of the destination locations.
+   * @param aclInfo the ACL info for mount point.
+   * @return If the mount point was added.
+   * @throws IOException Error adding the mount point.
+   */
+  public boolean addMount(String mount, String[] nss, String dest,
+      boolean readonly, DestinationOrder order, ACLEntity aclInfo)
+      throws IOException {
+    // Get the existing entry
+    MountTableManager mountTable = client.getMountTableManager();
+    GetMountTableEntriesRequest getRequest =
+        GetMountTableEntriesRequest.newInstance(mount);
+    GetMountTableEntriesResponse getResponse =
+        mountTable.getMountTableEntries(getRequest);
+    List<MountTable> results = getResponse.getEntries();
+    MountTable existingEntry = null;
+    for (MountTable result : results) {
+      if (mount.equals(result.getSourcePath())) {
+        existingEntry = result;
+      }
+    }
+
+    if (existingEntry == null) {
+      // Create and add the entry if it doesn't exist
+      Map<String, String> destMap = new LinkedHashMap<>();
+      for (String ns : nss) {
+        destMap.put(ns, dest);
+      }
+      MountTable newEntry = MountTable.newInstance(mount, destMap);
+      if (readonly) {
+        newEntry.setReadOnly(true);
+      }
+      if (order != null) {
+        newEntry.setDestOrder(order);
+      }
+
+      // Set ACL info for mount table entry
+      if (aclInfo.getOwner() != null) {
+        newEntry.setOwnerName(aclInfo.getOwner());
+      }
+
+      if (aclInfo.getGroup() != null) {
+        newEntry.setGroupName(aclInfo.getGroup());
+      }
+
+      if (aclInfo.getMode() != null) {
+        newEntry.setMode(aclInfo.getMode());
+      }
+
+      AddMountTableEntryRequest request =
+          AddMountTableEntryRequest.newInstance(newEntry);
+      AddMountTableEntryResponse addResponse =
+          mountTable.addMountTableEntry(request);
+      boolean added = addResponse.getStatus();
+      if (!added) {
+        System.err.println("Cannot add mount point " + mount);
+      }
+      return added;
+    } else {
+      // Update the existing entry if it exists
+      for (String nsId : nss) {
+        if (!existingEntry.addDestination(nsId, dest)) {
+          System.err.println("Cannot add destination at " + nsId + " " + dest);
+        }
+      }
+      if (readonly) {
+        existingEntry.setReadOnly(true);
+      }
+      if (order != null) {
+        existingEntry.setDestOrder(order);
+      }
+
+      // Update ACL info of mount table entry
+      if (aclInfo.getOwner() != null) {
+        existingEntry.setOwnerName(aclInfo.getOwner());
+      }
+
+      if (aclInfo.getGroup() != null) {
+        existingEntry.setGroupName(aclInfo.getGroup());
+      }
+
+      if (aclInfo.getMode() != null) {
+        existingEntry.setMode(aclInfo.getMode());
+      }
+
+      UpdateMountTableEntryRequest updateRequest =
+          UpdateMountTableEntryRequest.newInstance(existingEntry);
+      UpdateMountTableEntryResponse updateResponse =
+          mountTable.updateMountTableEntry(updateRequest);
+      boolean updated = updateResponse.getStatus();
+      if (!updated) {
+        System.err.println("Cannot update mount point " + mount);
+      }
+      return updated;
+    }
+  }
+
+  /**
+   * Remove mount point.
+   *
+   * @param path Path to remove.
+   * @throws IOException If it cannot be removed.
+   */
+  public boolean removeMount(String path) throws IOException {
+    MountTableManager mountTable = client.getMountTableManager();
+    RemoveMountTableEntryRequest request =
+        RemoveMountTableEntryRequest.newInstance(path);
+    RemoveMountTableEntryResponse response =
+        mountTable.removeMountTableEntry(request);
+    boolean removed = response.getStatus();
+    if (!removed) {
+      System.out.println("Cannot remove mount point " + path);
+    }
+    return removed;
+  }
+
+  /**
+   * List mount points.
+   *
+   * @param path Path to list.
+   * @throws IOException If it cannot be listed.
+   */
+  public void listMounts(String path) throws IOException {
+    MountTableManager mountTable = client.getMountTableManager();
+    GetMountTableEntriesRequest request =
+        GetMountTableEntriesRequest.newInstance(path);
+    GetMountTableEntriesResponse response =
+        mountTable.getMountTableEntries(request);
+    List<MountTable> entries = response.getEntries();
+    printMounts(entries);
+  }
+
+  private static void printMounts(List<MountTable> entries) {
+    System.out.println("Mount Table Entries:");
+    System.out.println(String.format(
+        "%-25s %-25s %-25s %-25s %-25s %-25s",
+        "Source", "Destinations", "Owner", "Group", "Mode", "Quota/Usage"));
+    for (MountTable entry : entries) {
+      StringBuilder destBuilder = new StringBuilder();
+      for (RemoteLocation location : entry.getDestinations()) {
+        if (destBuilder.length() > 0) {
+          destBuilder.append(",");
+        }
+        destBuilder.append(String.format("%s->%s", location.getNameserviceId(),
+            location.getDest()));
+      }
+      System.out.print(String.format("%-25s %-25s", entry.getSourcePath(),
+          destBuilder.toString()));
+
+      System.out.print(String.format(" %-25s %-25s %-25s",
+          entry.getOwnerName(), entry.getGroupName(), entry.getMode()));
+
+      System.out.println(String.format(" %-25s", entry.getQuota()));
+    }
+  }
+
+  /**
+   * Set quota for a mount table entry.
+   *
+   * @param parameters Parameters of the quota.
+   * @param i Index in the parameters.
+   */
+  private boolean setQuota(String[] parameters, int i) throws IOException {
+    long nsQuota = HdfsConstants.QUOTA_DONT_SET;
+    long ssQuota = HdfsConstants.QUOTA_DONT_SET;
+
+    String mount = parameters[i++];
+    while (i < parameters.length) {
+      if (parameters[i].equals("-nsQuota")) {
+        i++;
+        try {
+          nsQuota = Long.parseLong(parameters[i]);
+        } catch (Exception e) {
+          System.err.println("Cannot parse nsQuota: " + parameters[i]);
+        }
+      } else if (parameters[i].equals("-ssQuota")) {
+        i++;
+        try {
+          ssQuota = StringUtils.TraditionalBinaryPrefix
+              .string2long(parameters[i]);
+        } catch (Exception e) {
+          System.err.println("Cannot parse ssQuota: " + parameters[i]);
+        }
+      }
+
+      i++;
+    }
+
+    if (nsQuota <= 0 || ssQuota <= 0) {
+      System.err.println("Input quota value should be a positive number.");
+      return false;
+    }
+
+    return updateQuota(mount, nsQuota, ssQuota);
+  }
+
+  /**
+   * Clear quota of the mount point.
+   *
+   * @param mount Mount table to clear
+   * @return If the quota was cleared.
+   * @throws IOException Error clearing the mount point.
+   */
+  private boolean clrQuota(String mount) throws IOException {
+    return updateQuota(mount, HdfsConstants.QUOTA_DONT_SET,
+        HdfsConstants.QUOTA_DONT_SET);
+  }
+
+  /**
+   * Update quota of specified mount table.
+   *
+   * @param mount Specified mount table to update.
+   * @param nsQuota Namespace quota.
+   * @param ssQuota Storage space quota.
+   * @return If the quota was updated.
+   * @throws IOException Error updating quota.
+   */
+  private boolean updateQuota(String mount, long nsQuota, long ssQuota)
+      throws IOException {
+    // Get existing entry
+    MountTableManager mountTable = client.getMountTableManager();
+    GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest
+        .newInstance(mount);
+    GetMountTableEntriesResponse getResponse = mountTable
+        .getMountTableEntries(getRequest);
+    List<MountTable> results = getResponse.getEntries();
+    MountTable existingEntry = null;
+    for (MountTable result : results) {
+      if (mount.equals(result.getSourcePath())) {
+        existingEntry = result;
+        break;
+      }
+    }
+
+    if (existingEntry == null) {
+      return false;
+    } else {
+      long nsCount = existingEntry.getQuota().getFileAndDirectoryCount();
+      long ssCount = existingEntry.getQuota().getSpaceConsumed();
+      // If nsQuota or ssQuota was unset, reset corresponding usage
+      // value to zero.
+      if (nsQuota == HdfsConstants.QUOTA_DONT_SET) {
+        nsCount = RouterQuotaUsage.QUOTA_USAGE_COUNT_DEFAULT;
+      }
+
+      if (nsQuota == HdfsConstants.QUOTA_DONT_SET) {
+        ssCount = RouterQuotaUsage.QUOTA_USAGE_COUNT_DEFAULT;
+      }
+
+      RouterQuotaUsage updatedQuota = new RouterQuotaUsage.Builder()
+          .fileAndDirectoryCount(nsCount).quota(nsQuota)
+          .spaceConsumed(ssCount).spaceQuota(ssQuota).build();
+      existingEntry.setQuota(updatedQuota);
+    }
+
+    UpdateMountTableEntryRequest updateRequest =
+        UpdateMountTableEntryRequest.newInstance(existingEntry);
+    UpdateMountTableEntryResponse updateResponse = mountTable
+        .updateMountTableEntry(updateRequest);
+    return updateResponse.getStatus();
+  }
+
+  /**
+   * Manager the safe mode state.
+   * @param cmd Input command, enter or leave safe mode.
+   * @throws IOException
+   */
+  private void manageSafeMode(String cmd) throws IOException {
+    if (cmd.equals("enter")) {
+      if (enterSafeMode()) {
+        System.out.println("Successfully enter safe mode.");
+      }
+    } else if (cmd.equals("leave")) {
+      if (leaveSafeMode()) {
+        System.out.println("Successfully leave safe mode.");
+      }
+    } else if (cmd.equals("get")) {
+      boolean result = getSafeMode();
+      System.out.println("Safe Mode: " + result);
+    }
+  }
+
+  /**
+   * Request the Router entering safemode state.
+   * @return Return true if entering safemode successfully.
+   * @throws IOException
+   */
+  private boolean enterSafeMode() throws IOException {
+    RouterStateManager stateManager = client.getRouterStateManager();
+    EnterSafeModeResponse response = stateManager.enterSafeMode(
+        EnterSafeModeRequest.newInstance());
+    return response.getStatus();
+  }
+
+  /**
+   * Request the Router leaving safemode state.
+   * @return Return true if leaving safemode successfully.
+   * @throws IOException
+   */
+  private boolean leaveSafeMode() throws IOException {
+    RouterStateManager stateManager = client.getRouterStateManager();
+    LeaveSafeModeResponse response = stateManager.leaveSafeMode(
+        LeaveSafeModeRequest.newInstance());
+    return response.getStatus();
+  }
+
+  /**
+   * Verify if current Router state is safe mode state.
+   * @return True if the Router is in safe mode.
+   * @throws IOException
+   */
+  private boolean getSafeMode() throws IOException {
+    RouterStateManager stateManager = client.getRouterStateManager();
+    GetSafeModeResponse response = stateManager.getSafeMode(
+        GetSafeModeRequest.newInstance());
+    return response.isInSafeMode();
+  }
+
+  /**
+   * Inner class that stores ACL info of mount table.
+   */
+  static class ACLEntity {
+    private final String owner;
+    private final String group;
+    private final FsPermission mode;
+
+    ACLEntity(String owner, String group, FsPermission mode) {
+      this.owner = owner;
+      this.group = group;
+      this.mode = mode;
+    }
+
+    public String getOwner() {
+      return owner;
+    }
+
+    public String getGroup() {
+      return group;
+    }
+
+    public FsPermission getMode() {
+      return mode;
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/package-info.java
new file mode 100644
index 0000000..466c3d3
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/package-info.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * It includes the tools to manage the Router-based federation. Includes the
+ * utilities to add and remove mount table entries.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+package org.apache.hadoop.hdfs.tools.federation;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto
new file mode 100644
index 0000000..b0d6982
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/FederationProtocol.proto
@@ -0,0 +1,244 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+option java_package = "org.apache.hadoop.hdfs.federation.protocol.proto";
+option java_outer_classname = "HdfsServerFederationProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
+
+import "hdfs.proto";
+
+/////////////////////////////////////////////////
+// Membership
+/////////////////////////////////////////////////
+
+message NamenodeMembershipStatsRecordProto {
+  optional uint64 totalSpace = 1;
+  optional uint64 availableSpace = 2;
+  optional uint64 providedSpace = 3;
+
+  optional uint64 numOfFiles = 10;
+  optional uint64 numOfBlocks = 11;
+  optional uint64 numOfBlocksMissing = 12;
+  optional uint64 numOfBlocksPendingReplication = 13;
+  optional uint64 numOfBlocksUnderReplicated = 14;
+  optional uint64 numOfBlocksPendingDeletion = 15;
+
+  optional uint32 numOfActiveDatanodes = 20;
+  optional uint32 numOfDeadDatanodes = 21;
+  optional uint32 numOfDecommissioningDatanodes = 22;
+  optional uint32 numOfDecomActiveDatanodes = 23;
+  optional uint32 numOfDecomDeadDatanodes = 24;
+}
+
+message NamenodeMembershipRecordProto {
+  optional uint64 dateCreated = 1;
+  optional uint64 dateModified = 2;
+  optional uint64 lastContact = 3;
+  optional string routerId = 4;
+  optional string nameserviceId = 5;
+  optional string namenodeId = 6;
+  optional string clusterId = 7;
+  optional string blockPoolId = 8;
+  optional string webAddress = 9;
+  optional string rpcAddress = 10;
+  optional string serviceAddress = 11;
+  optional string lifelineAddress = 12;
+  optional string state = 13;
+  optional bool isSafeMode = 14;
+
+  optional NamenodeMembershipStatsRecordProto stats = 15;
+}
+
+message FederationNamespaceInfoProto {
+  optional string blockPoolId = 1;
+  optional string clusterId = 2;
+  optional string nameserviceId = 3;
+}
+
+message GetNamenodeRegistrationsRequestProto {
+  optional NamenodeMembershipRecordProto membership = 1;
+}
+
+message GetNamenodeRegistrationsResponseProto {
+  repeated NamenodeMembershipRecordProto namenodeMemberships = 1;
+}
+
+message GetExpiredRegistrationsRequestProto {
+}
+
+message GetNamespaceInfoRequestProto {
+}
+
+message GetNamespaceInfoResponseProto {
+  repeated FederationNamespaceInfoProto namespaceInfos = 1;
+}
+
+message UpdateNamenodeRegistrationRequestProto {
+  optional string nameserviceId = 1;
+  optional string namenodeId = 2;
+  optional string state = 3;
+}
+
+message UpdateNamenodeRegistrationResponseProto {
+  optional bool status = 1;
+}
+
+message NamenodeHeartbeatRequestProto {
+  optional NamenodeMembershipRecordProto namenodeMembership = 1;
+}
+
+message NamenodeHeartbeatResponseProto {
+  optional bool status = 1;
+}
+
+
+/////////////////////////////////////////////////
+// Mount table
+/////////////////////////////////////////////////
+
+message RemoteLocationProto {
+  optional string nameserviceId = 1;
+  optional string path = 2;
+}
+
+message MountTableRecordProto {
+  optional string srcPath = 1;
+  repeated RemoteLocationProto destinations = 2;
+  optional uint64 dateCreated = 3;
+  optional uint64 dateModified = 4;
+  optional bool readOnly = 5 [default = false];
+
+  enum DestOrder {
+    HASH = 0;
+    LOCAL = 1;
+    RANDOM = 2;
+    HASH_ALL = 3;
+  }
+  optional DestOrder destOrder = 6 [default = HASH];
+
+  optional string ownerName = 10;
+  optional string groupName = 11;
+  optional int32 mode = 12;
+
+  optional QuotaUsageProto quota = 13;
+}
+
+message AddMountTableEntryRequestProto {
+  optional MountTableRecordProto entry = 1;
+}
+
+message AddMountTableEntryResponseProto {
+  optional bool status = 1;
+}
+
+message UpdateMountTableEntryRequestProto {
+  optional MountTableRecordProto entry = 1;
+}
+
+message UpdateMountTableEntryResponseProto {
+  optional bool status = 1;
+}
+
+message RemoveMountTableEntryRequestProto {
+  optional string srcPath = 1;
+}
+
+message RemoveMountTableEntryResponseProto{
+  optional bool status = 1;
+}
+
+message GetMountTableEntriesRequestProto {
+  optional string srcPath = 1;
+}
+
+message GetMountTableEntriesResponseProto {
+  repeated MountTableRecordProto entries = 1;
+  optional uint64 timestamp = 2;
+}
+
+
+/////////////////////////////////////////////////
+// Routers
+/////////////////////////////////////////////////
+
+message StateStoreVersionRecordProto {
+  optional uint64 membershipVersion = 1;
+  optional uint64 mountTableVersion = 2;
+}
+
+message RouterRecordProto {
+  optional uint64 dateCreated = 1;
+  optional uint64 dateModified = 2;
+  optional string address = 3;
+  optional string status = 4;
+  optional StateStoreVersionRecordProto stateStoreVersion = 5;
+  optional string version = 6;
+  optional string compileInfo = 7;
+  optional uint64 dateStarted = 8;
+}
+
+message GetRouterRegistrationRequestProto {
+  optional string routerId = 1;
+}
+
+message GetRouterRegistrationResponseProto {
+  optional RouterRecordProto router = 1;
+}
+
+message GetRouterRegistrationsRequestProto {
+}
+
+message GetRouterRegistrationsResponseProto {
+  optional uint64 timestamp = 1;
+  repeated RouterRecordProto routers = 2;
+}
+
+message RouterHeartbeatRequestProto {
+  optional RouterRecordProto router = 1;
+}
+
+message RouterHeartbeatResponseProto {
+  optional bool status = 1;
+}
+
+/////////////////////////////////////////////////
+// Route State
+/////////////////////////////////////////////////
+
+message EnterSafeModeRequestProto {
+}
+
+message EnterSafeModeResponseProto {
+  optional bool status = 1;
+}
+
+message LeaveSafeModeRequestProto {
+}
+
+message LeaveSafeModeResponseProto {
+  optional bool status = 1;
+}
+
+message GetSafeModeRequestProto {
+}
+
+message GetSafeModeResponseProto {
+  optional bool isInSafeMode = 1;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/RouterProtocol.proto
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/RouterProtocol.proto 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/RouterProtocol.proto
new file mode 100644
index 0000000..a4e4d65
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/proto/RouterProtocol.proto
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+option java_package = "org.apache.hadoop.hdfs.protocol.proto";
+option java_outer_classname = "RouterProtocolProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+package hadoop.hdfs.router;
+
+import "FederationProtocol.proto";
+
+service RouterAdminProtocolService {
+  /**
+   * Add a mount table entry.
+   */
+  rpc addMountTableEntry(AddMountTableEntryRequestProto) 
returns(AddMountTableEntryResponseProto);
+
+  /**
+   * Update an existing mount table entry without copying files.
+   */
+  rpc updateMountTableEntry(UpdateMountTableEntryRequestProto) 
returns(UpdateMountTableEntryResponseProto);
+
+  /**
+   * Remove a mount table entry.
+   */
+  rpc removeMountTableEntry(RemoveMountTableEntryRequestProto) 
returns(RemoveMountTableEntryResponseProto);
+
+  /**
+   * Get matching mount entries
+   */
+  rpc getMountTableEntries(GetMountTableEntriesRequestProto) 
returns(GetMountTableEntriesResponseProto);
+
+  /**
+   * Transform Router state to safe mode state.
+   */
+  rpc enterSafeMode(EnterSafeModeRequestProto) returns 
(EnterSafeModeResponseProto);
+
+  /**
+   * Transform Router state from safe mode to running state.
+   */
+  rpc leaveSafeMode(LeaveSafeModeRequestProto) returns 
(LeaveSafeModeResponseProto);
+
+  /**
+   * Verify if current Router state is safe mode state.
+   */
+  rpc getSafeMode(GetSafeModeRequestProto) returns (GetSafeModeResponseProto);
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
new file mode 100644
index 0000000..92f899d
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/resources/hdfs-rbf-default.xml
@@ -0,0 +1,434 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Do not modify this file directly.  Instead, copy entries that you -->
+<!-- wish to modify from this file into hdfs-site.xml and change them -->
+<!-- there.  If hdfs-site.xml does not already exist, create it.      -->
+
+<configuration>
+  <property>
+    <name>dfs.federation.router.default.nameserviceId</name>
+    <value></value>
+    <description>
+      Nameservice identifier of the default subcluster to monitor.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.rpc.enable</name>
+    <value>true</value>
+    <description>
+      If true, the RPC service to handle client requests in the router is
+      enabled.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.rpc-address</name>
+    <value>0.0.0.0:8888</value>
+    <description>
+      RPC address that handles all clients requests.
+      The value of this property will take the form of router-host1:rpc-port.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.rpc-bind-host</name>
+    <value></value>
+    <description>
+      The actual address the RPC server will bind to. If this optional address 
is
+      set, it overrides only the hostname portion of
+      dfs.federation.router.rpc-address. This is useful for making the name 
node
+      listen on all interfaces by setting it to 0.0.0.0.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.handler.count</name>
+    <value>10</value>
+    <description>
+      The number of server threads for the router to handle RPC requests from
+      clients.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.handler.queue.size</name>
+    <value>100</value>
+    <description>
+      The size of the queue for the number of handlers to handle RPC client 
requests.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.reader.count</name>
+    <value>1</value>
+    <description>
+      The number of readers for the router to handle RPC client requests.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.reader.queue.size</name>
+    <value>100</value>
+    <description>
+      The size of the queue for the number of readers for the router to handle 
RPC client requests.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.connection.pool-size</name>
+    <value>1</value>
+    <description>
+      Size of the pool of connections from the router to namenodes.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.connection.clean.ms</name>
+    <value>10000</value>
+    <description>
+      Time interval, in milliseconds, to check if the connection pool should
+      remove unused connections.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.connection.pool.clean.ms</name>
+    <value>60000</value>
+    <description>
+      Time interval, in milliseconds, to check if the connection manager should
+      remove unused connection pools.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.metrics.enable</name>
+    <value>true</value>
+    <description>
+      If the metrics in the router are enabled.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.metrics.class</name>
+    
<value>org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCPerformanceMonitor</value>
+    <description>
+      Class to monitor the RPC system in the router. It must implement the
+      RouterRpcMonitor interface.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.admin.enable</name>
+    <value>true</value>
+    <description>
+      If true, the RPC admin service to handle client requests in the router is
+      enabled.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.admin-address</name>
+    <value>0.0.0.0:8111</value>
+    <description>
+      RPC address that handles the admin requests.
+      The value of this property will take the form of router-host1:rpc-port.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.admin-bind-host</name>
+    <value></value>
+    <description>
+      The actual address the RPC admin server will bind to. If this optional
+      address is set, it overrides only the hostname portion of
+      dfs.federation.router.admin-address. This is useful for making the name
+      node listen on all interfaces by setting it to 0.0.0.0.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.admin.handler.count</name>
+    <value>1</value>
+    <description>
+      The number of server threads for the router to handle RPC requests from
+      admin.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.http-address</name>
+    <value>0.0.0.0:50071</value>
+    <description>
+      HTTP address that handles the web requests to the Router.
+      The value of this property will take the form of router-host1:http-port.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.http-bind-host</name>
+    <value></value>
+    <description>
+      The actual address the HTTP server will bind to. If this optional
+      address is set, it overrides only the hostname portion of
+      dfs.federation.router.http-address. This is useful for making the name
+      node listen on all interfaces by setting it to 0.0.0.0.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.https-address</name>
+    <value>0.0.0.0:50072</value>
+    <description>
+      HTTPS address that handles the web requests to the Router.
+      The value of this property will take the form of router-host1:https-port.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.https-bind-host</name>
+    <value></value>
+    <description>
+      The actual address the HTTPS server will bind to. If this optional
+      address is set, it overrides only the hostname portion of
+      dfs.federation.router.https-address. This is useful for making the name
+      node listen on all interfaces by setting it to 0.0.0.0.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.http.enable</name>
+    <value>true</value>
+    <description>
+      If the HTTP service to handle client requests in the router is enabled.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.metrics.enable</name>
+    <value>true</value>
+    <description>
+      If the metrics service in the router is enabled.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.file.resolver.client.class</name>
+    
<value>org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver</value>
+    <description>
+      Class to resolve files to subclusters.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.namenode.resolver.client.class</name>
+    
<value>org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver</value>
+    <description>
+      Class to resolve the namenode for a subcluster.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.store.enable</name>
+    <value>true</value>
+    <description>
+      If true, the Router connects to the State Store.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.store.serializer</name>
+    
<value>org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl</value>
+    <description>
+      Class to serialize State Store records.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.store.driver.class</name>
+    
<value>org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl</value>
+    <description>
+      Class to implement the State Store. There are three implementation 
classes currently
+      being supported:
+      
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl,
+      
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileSystemImpl
 and
+      
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl.
+      These implementation classes use the local file, filesystem and 
ZooKeeper as a backend respectively.
+      By default it uses the ZooKeeper as the default State Store.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.store.connection.test</name>
+    <value>60000</value>
+    <description>
+      How often to check for the connection to the State Store in milliseconds.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.cache.ttl</name>
+    <value>1m</value>
+    <description>
+      How often to refresh the State Store caches in milliseconds. This setting
+      supports multiple time unit suffixes as described in
+      dfs.heartbeat.interval. If no suffix is specified then milliseconds is
+      assumed.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.store.membership.expiration</name>
+    <value>300000</value>
+    <description>
+      Expiration time in milliseconds for a membership record.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.heartbeat.enable</name>
+    <value>true</value>
+    <description>
+      If true, the Router heartbeats into the State Store.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.heartbeat.interval</name>
+    <value>5000</value>
+    <description>
+      How often the Router should heartbeat into the State Store in 
milliseconds.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.heartbeat-state.interval</name>
+    <value>5s</value>
+    <description>
+      How often the Router should heartbeat its state into the State Store in
+      milliseconds. This setting supports multiple time unit suffixes as
+      described in dfs.federation.router.quota-cache.update.interval.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.store.router.expiration</name>
+    <value>5m</value>
+    <description>
+      Expiration time in milliseconds for a router state record. This setting
+      supports multiple time unit suffixes as described in
+      dfs.federation.router.quota-cache.update.interval.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.safemode.enable</name>
+    <value>true</value>
+    <description>
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.safemode.extension</name>
+    <value>30s</value>
+    <description>
+      Time after startup that the Router is in safe mode. This setting
+      supports multiple time unit suffixes as described in
+      dfs.heartbeat.interval. If no suffix is specified then milliseconds is
+      assumed.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.safemode.expiration</name>
+    <value>3m</value>
+    <description>
+      Time without being able to reach the State Store to enter safe mode. This
+      setting supports multiple time unit suffixes as described in
+      dfs.heartbeat.interval. If no suffix is specified then milliseconds is
+      assumed.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.monitor.namenode</name>
+    <value></value>
+    <description>
+      The identifier of the namenodes to monitor and heartbeat.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.monitor.localnamenode.enable</name>
+    <value>true</value>
+    <description>
+      If true, the Router should monitor the namenode in the local machine.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.mount-table.max-cache-size</name>
+    <value>10000</value>
+    <description>
+      Maximum number of mount table cache entries to have.
+      By default, remove cache entries if we have more than 10k.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.quota.enable</name>
+    <value>false</value>
+    <description>
+      Set to true to enable quota system in Router.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.quota-cache.update.interval</name>
+    <value>60s</value>
+    <description>
+      Interval time for updating quota usage cache in Router.
+      This property is used only if the value of
+      dfs.federation.router.quota.enable is true.
+      This setting supports multiple time unit suffixes as described
+      in dfs.heartbeat.interval. If no suffix is specified then milliseconds
+      is assumed.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.client.thread-size</name>
+    <value>32</value>
+    <description>
+      Max threads size for the RouterClient to execute concurrent
+      requests.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.client.retry.max.attempts</name>
+    <value>3</value>
+    <description>
+      Max retry attempts for the RouterClient talking to the Router.
+    </description>
+  </property>
+
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87700d45/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/proto-web.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/proto-web.xml 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/proto-web.xml
new file mode 100644
index 0000000..83b36b2
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/proto-web.xml
@@ -0,0 +1,16 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<web-app version="2.4" xmlns="http://java.sun.com/xml/ns/j2ee";>
+</web-app>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to