http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java
deleted file mode 100644
index 49cf544..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lock;
-
-import org.apache.commons.pool2.impl.GenericObjectPool;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-
-/**
- * Manages the locks on a given resource. A new lock is created for each
- * and every unique resource. Uniqueness of resource depends on the
- * {@code equals} implementation of it.
- */
-public class LockManager<T> {
-
-  private static final Logger LOG = LoggerFactory.getLogger(LockManager.class);
-
-  private final Map<T, ActiveLock> activeLocks = new ConcurrentHashMap<>();
-  private final GenericObjectPool<ActiveLock> lockPool =
-      new GenericObjectPool<>(new PooledLockFactory());
-
-  /**
-   * Creates new LockManager instance.
-   *
-   * @param conf Configuration object
-   */
-  public LockManager(Configuration conf) {
-    int maxPoolSize = conf.getInt(HddsConfigKeys.HDDS_LOCK_MAX_CONCURRENCY,
-        HddsConfigKeys.HDDS_LOCK_MAX_CONCURRENCY_DEFAULT);
-    lockPool.setMaxTotal(maxPoolSize);
-  }
-
-
-  /**
-   * Acquires the lock on given resource.
-   *
-   * <p>If the lock is not available then the current thread becomes
-   * disabled for thread scheduling purposes and lies dormant until the
-   * lock has been acquired.
-   */
-  public void lock(T resource) {
-    activeLocks.compute(resource, (k, v) -> {
-      ActiveLock lock;
-      try {
-        if (v == null) {
-          lock = lockPool.borrowObject();
-        } else {
-          lock = v;
-        }
-        lock.incrementActiveCount();
-      } catch (Exception ex) {
-        LOG.error("Unable to obtain lock.", ex);
-        throw new RuntimeException(ex);
-      }
-      return lock;
-    }).lock();
-  }
-
-  /**
-   * Releases the lock on given resource.
-   */
-  public void unlock(T resource) {
-    ActiveLock lock = activeLocks.get(resource);
-    if (lock == null) {
-      // Someone is releasing a lock which was never acquired. Log and return.
-      LOG.warn("Trying to release the lock on {}, which was never acquired.",
-          resource);
-      return;
-    }
-    lock.unlock();
-    activeLocks.computeIfPresent(resource, (k, v) -> {
-      v.decrementActiveCount();
-      if (v.getActiveLockCount() != 0) {
-        return v;
-      }
-      lockPool.returnObject(v);
-      return null;
-    });
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java
deleted file mode 100644
index 4c24ef7..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lock;
-
-import org.apache.commons.pool2.BasePooledObjectFactory;
-import org.apache.commons.pool2.PooledObject;
-import org.apache.commons.pool2.impl.DefaultPooledObject;
-
-/**
- * Pool factory to create {@code ActiveLock} instances.
- */
-public class PooledLockFactory extends BasePooledObjectFactory<ActiveLock> {
-
-  @Override
-  public ActiveLock create() throws Exception {
-    return ActiveLock.newInstance();
-  }
-
-  @Override
-  public PooledObject<ActiveLock> wrap(ActiveLock activeLock) {
-    return new DefaultPooledObject<>(activeLock);
-  }
-
-  @Override
-  public void activateObject(PooledObject<ActiveLock> pooledObject) {
-    pooledObject.getObject().resetCounter();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/package-info.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/package-info.java
deleted file mode 100644
index 5c677ce..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lock;
-/*
- This package contains the lock related classes.
- */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/package-info.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/package-info.java
deleted file mode 100644
index db399db..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/package-info.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone;
-
-/**
- This package contains class that support ozone implementation on the datanode
- side.
-
- Main parts of ozone on datanode are:
-
- 1. REST Interface - This code lives under the web directory and listens to the
- WebHDFS port.
-
- 2. Datanode container classes: This support persistence of ozone objects on
- datanode. These classes live under container directory.
-
- 3. Client and Shell: We also support a ozone REST client lib, they are under
- web/client and web/ozShell.
-
- */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java
deleted file mode 100644
index 37a1309..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocolPB;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
-import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
-import 
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
-    .AllocateScmBlockRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
-    .AllocateScmBlockResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
-    .DeleteKeyBlocksResultProto;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
-    .DeleteScmKeyBlocksRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
-    .DeleteScmKeyBlocksResponseProto;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.stream.Collectors;
-
-/**
- * This class is the server-side translator that forwards requests received on
- * {@link StorageContainerLocationProtocolPB} to the
- * {@link StorageContainerLocationProtocol} server implementation.
- */
[email protected]
-public final class ScmBlockLocationProtocolServerSideTranslatorPB
-    implements ScmBlockLocationProtocolPB {
-
-  private final ScmBlockLocationProtocol impl;
-
-  /**
-   * Creates a new ScmBlockLocationProtocolServerSideTranslatorPB.
-   *
-   * @param impl {@link ScmBlockLocationProtocol} server implementation
-   */
-  public ScmBlockLocationProtocolServerSideTranslatorPB(
-      ScmBlockLocationProtocol impl) throws IOException {
-    this.impl = impl;
-  }
-
-  @Override
-  public AllocateScmBlockResponseProto allocateScmBlock(
-      RpcController controller, AllocateScmBlockRequestProto request)
-      throws ServiceException {
-    try {
-      AllocatedBlock allocatedBlock =
-          impl.allocateBlock(request.getSize(), request.getType(),
-              request.getFactor(), request.getOwner());
-      if (allocatedBlock != null) {
-        return
-            AllocateScmBlockResponseProto.newBuilder()
-                .setBlockID(allocatedBlock.getBlockID().getProtobuf())
-                .setPipeline(allocatedBlock.getPipeline().getProtobufMessage())
-                .setCreateContainer(allocatedBlock.getCreateContainer())
-                .setErrorCode(AllocateScmBlockResponseProto.Error.success)
-                .build();
-      } else {
-        return AllocateScmBlockResponseProto.newBuilder()
-            .setErrorCode(AllocateScmBlockResponseProto.Error.unknownFailure)
-            .build();
-      }
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public DeleteScmKeyBlocksResponseProto deleteScmKeyBlocks(
-      RpcController controller, DeleteScmKeyBlocksRequestProto req)
-      throws ServiceException {
-    DeleteScmKeyBlocksResponseProto.Builder resp =
-        DeleteScmKeyBlocksResponseProto.newBuilder();
-    try {
-      List<BlockGroup> infoList = req.getKeyBlocksList().stream()
-          .map(BlockGroup::getFromProto).collect(Collectors.toList());
-      final List<DeleteBlockGroupResult> results =
-          impl.deleteKeyBlocks(infoList);
-      for (DeleteBlockGroupResult result: results) {
-        DeleteKeyBlocksResultProto.Builder deleteResult =
-            DeleteKeyBlocksResultProto
-            .newBuilder()
-            .setObjectKey(result.getObjectKey())
-            .addAllBlockResults(result.getBlockResultProtoList());
-        resp.addResults(deleteResult.build());
-      }
-    } catch (IOException ex) {
-      throw new ServiceException(ex);
-    }
-    return resp.build();
-  }
-
-  @Override
-  public HddsProtos.GetScmInfoRespsonseProto getScmInfo(
-      RpcController controller, HddsProtos.GetScmInfoRequestProto req)
-      throws ServiceException {
-    ScmInfo scmInfo;
-    try {
-      scmInfo = impl.getScmInfo();
-    } catch (IOException ex) {
-      throw new ServiceException(ex);
-    }
-    return HddsProtos.GetScmInfoRespsonseProto.newBuilder()
-        .setClusterId(scmInfo.getClusterId())
-        .setScmId(scmInfo.getScmId())
-        .build();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java
deleted file mode 100644
index d2723f0..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java
+++ /dev/null
@@ -1,253 +0,0 @@
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocolPB;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.InChillModeRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.InChillModeResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ForceExitChillModeRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ForceExitChillModeResponseProto;
-import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineRequestProto;
-import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineResponseProto;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
-import 
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.GetContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.GetContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ObjectStageChangeResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.PipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.PipelineResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.SCMDeleteContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.SCMListContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.SCMListContainerResponseProto;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * This class is the server-side translator that forwards requests received on
- * {@link StorageContainerLocationProtocolPB} to the
- * {@link StorageContainerLocationProtocol} server implementation.
- */
[email protected]
-public final class StorageContainerLocationProtocolServerSideTranslatorPB
-    implements StorageContainerLocationProtocolPB {
-
-  private final StorageContainerLocationProtocol impl;
-
-  /**
-   * Creates a new StorageContainerLocationProtocolServerSideTranslatorPB.
-   *
-   * @param impl {@link StorageContainerLocationProtocol} server implementation
-   */
-  public StorageContainerLocationProtocolServerSideTranslatorPB(
-      StorageContainerLocationProtocol impl) throws IOException {
-    this.impl = impl;
-  }
-
-  @Override
-  public ContainerResponseProto allocateContainer(RpcController unused,
-      ContainerRequestProto request) throws ServiceException {
-    try {
-      ContainerWithPipeline containerWithPipeline = impl
-          .allocateContainer(request.getReplicationType(),
-              request.getReplicationFactor(), request.getOwner());
-      return ContainerResponseProto.newBuilder()
-          .setContainerWithPipeline(containerWithPipeline.getProtobuf())
-          .setErrorCode(ContainerResponseProto.Error.success)
-          .build();
-
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public GetContainerResponseProto getContainer(
-      RpcController controller, GetContainerRequestProto request)
-      throws ServiceException {
-    try {
-      ContainerInfo container = impl.getContainer(request.getContainerID());
-      return GetContainerResponseProto.newBuilder()
-          .setContainerInfo(container.getProtobuf())
-          .build();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public GetContainerWithPipelineResponseProto getContainerWithPipeline(
-      RpcController controller, GetContainerWithPipelineRequestProto request)
-      throws ServiceException {
-    try {
-      ContainerWithPipeline container = impl
-          .getContainerWithPipeline(request.getContainerID());
-      return GetContainerWithPipelineResponseProto.newBuilder()
-          .setContainerWithPipeline(container.getProtobuf())
-          .build();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public SCMListContainerResponseProto listContainer(RpcController controller,
-      SCMListContainerRequestProto request) throws ServiceException {
-    try {
-      long startContainerID = 0;
-      int count = -1;
-
-      // Arguments check.
-      if (request.hasStartContainerID()) {
-        // End container name is given.
-        startContainerID = request.getStartContainerID();
-      }
-      count = request.getCount();
-      List<ContainerInfo> containerList =
-          impl.listContainer(startContainerID, count);
-      SCMListContainerResponseProto.Builder builder =
-          SCMListContainerResponseProto.newBuilder();
-      for (ContainerInfo container : containerList) {
-        builder.addContainers(container.getProtobuf());
-      }
-      return builder.build();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public SCMDeleteContainerResponseProto deleteContainer(
-      RpcController controller, SCMDeleteContainerRequestProto request)
-      throws ServiceException {
-    try {
-      impl.deleteContainer(request.getContainerID());
-      return SCMDeleteContainerResponseProto.newBuilder().build();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public StorageContainerLocationProtocolProtos.NodeQueryResponseProto
-      queryNode(RpcController controller,
-      StorageContainerLocationProtocolProtos.NodeQueryRequestProto request)
-      throws ServiceException {
-    try {
-      HddsProtos.NodeState nodeState = request.getState();
-      List<HddsProtos.Node> datanodes = impl.queryNode(nodeState,
-          request.getScope(), request.getPoolName());
-      return StorageContainerLocationProtocolProtos
-          .NodeQueryResponseProto.newBuilder()
-          .addAllDatanodes(datanodes)
-          .build();
-    } catch (Exception e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public ObjectStageChangeResponseProto notifyObjectStageChange(
-      RpcController controller, ObjectStageChangeRequestProto request)
-      throws ServiceException {
-    try {
-      impl.notifyObjectStageChange(request.getType(), request.getId(),
-          request.getOp(), request.getStage());
-      return ObjectStageChangeResponseProto.newBuilder().build();
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public PipelineResponseProto allocatePipeline(
-      RpcController controller, PipelineRequestProto request)
-      throws ServiceException {
-    // TODO : Wiring this up requires one more patch.
-    return null;
-  }
-
-  @Override
-  public HddsProtos.GetScmInfoRespsonseProto getScmInfo(
-      RpcController controller, HddsProtos.GetScmInfoRequestProto req)
-      throws ServiceException {
-    try {
-      ScmInfo scmInfo = impl.getScmInfo();
-      return HddsProtos.GetScmInfoRespsonseProto.newBuilder()
-          .setClusterId(scmInfo.getClusterId())
-          .setScmId(scmInfo.getScmId())
-          .build();
-    } catch (IOException ex) {
-      throw new ServiceException(ex);
-    }
-
-  }
-
-  @Override
-  public InChillModeResponseProto inChillMode(
-      RpcController controller,
-      InChillModeRequestProto request) throws ServiceException {
-    try {
-      return InChillModeResponseProto.newBuilder()
-          .setInChillMode(impl.inChillMode()).build();
-    } catch (IOException ex) {
-      throw new ServiceException(ex);
-    }
-  }
-
-  @Override
-  public ForceExitChillModeResponseProto forceExitChillMode(
-      RpcController controller, ForceExitChillModeRequestProto request)
-      throws ServiceException {
-    try {
-      return ForceExitChillModeResponseProto.newBuilder()
-          .setExitedChillMode(impl.forceExitChillMode()).build();
-    } catch (IOException ex) {
-      throw new ServiceException(ex);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
deleted file mode 100644
index 860386d..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.protocolPB;
-
-/**
- * This package contains classes for the Protocol Buffers binding of Ozone
- * protocols.
- */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java
deleted file mode 100644
index af56da3..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.utils;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.fasterxml.jackson.databind.type.CollectionType;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * JSON Utility functions used in ozone.
- */
-public final class JsonUtils {
-
-  // Reuse ObjectMapper instance for improving performance.
-  // ObjectMapper is thread safe as long as we always configure instance
-  // before use.
-  private static final ObjectMapper MAPPER = new ObjectMapper();
-  private static final ObjectReader READER = MAPPER.readerFor(Object.class);
-  private static final ObjectWriter WRITTER =
-      MAPPER.writerWithDefaultPrettyPrinter();
-
-  private JsonUtils() {
-    // Never constructed
-  }
-
-  public static String toJsonStringWithDefaultPrettyPrinter(String jsonString)
-      throws IOException {
-    Object json = READER.readValue(jsonString);
-    return WRITTER.writeValueAsString(json);
-  }
-
-  public static String toJsonString(Object obj) throws IOException {
-    return MAPPER.writeValueAsString(obj);
-  }
-
-  /**
-   * Deserialize a list of elements from a given string,
-   * each element in the list is in the given type.
-   *
-   * @param str json string.
-   * @param elementType element type.
-   * @return List of elements of type elementType
-   * @throws IOException
-   */
-  public static List<?> toJsonList(String str, Class<?> elementType)
-      throws IOException {
-    CollectionType type = MAPPER.getTypeFactory()
-        .constructCollectionType(List.class, elementType);
-    return MAPPER.readValue(str, type);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java
deleted file mode 100644
index e5812c0..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.utils;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundService.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundService.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundService.java
deleted file mode 100644
index 5718008..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundService.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.utils;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-import java.util.concurrent.CompletionService;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorCompletionService;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-/**
- * An abstract class for a background service in ozone.
- * A background service schedules multiple child tasks in parallel
- * in a certain period. In each interval, it waits until all the tasks
- * finish execution and then schedule next interval.
- */
-public abstract class BackgroundService {
-
-  @VisibleForTesting
-  public static final Logger LOG =
-      LoggerFactory.getLogger(BackgroundService.class);
-
-  // Executor to launch child tasks
-  private final ScheduledExecutorService exec;
-  private final ThreadGroup threadGroup;
-  private final ThreadFactory threadFactory;
-  private final String serviceName;
-  private final long interval;
-  private final long serviceTimeout;
-  private final TimeUnit unit;
-  private final PeriodicalTask service;
-
-  public BackgroundService(String serviceName, long interval,
-      TimeUnit unit, int threadPoolSize, long serviceTimeout) {
-    this.interval = interval;
-    this.unit = unit;
-    this.serviceName = serviceName;
-    this.serviceTimeout = serviceTimeout;
-    threadGroup = new ThreadGroup(serviceName);
-    ThreadFactory tf = r -> new Thread(threadGroup, r);
-    threadFactory = new ThreadFactoryBuilder()
-        .setThreadFactory(tf)
-        .setDaemon(true)
-        .setNameFormat(serviceName + "#%d")
-        .build();
-    exec = Executors.newScheduledThreadPool(threadPoolSize, threadFactory);
-    service = new PeriodicalTask();
-  }
-
-  protected ExecutorService getExecutorService() {
-    return this.exec;
-  }
-
-  @VisibleForTesting
-  public int getThreadCount() {
-    return threadGroup.activeCount();
-  }
-
-  @VisibleForTesting
-  public void triggerBackgroundTaskForTesting() {
-    service.run();
-  }
-
-  // start service
-  public void start() {
-    exec.scheduleWithFixedDelay(service, 0, interval, unit);
-  }
-
-  public abstract BackgroundTaskQueue getTasks();
-
-  /**
-   * Run one or more background tasks concurrently.
-   * Wait until all tasks to return the result.
-   */
-  public class PeriodicalTask implements Runnable {
-    @Override
-    public synchronized void run() {
-      LOG.debug("Running background service : {}", serviceName);
-      BackgroundTaskQueue tasks = getTasks();
-      if (tasks.isEmpty()) {
-        // No task found, or some problems to init tasks
-        // return and retry in next interval.
-        return;
-      }
-
-      LOG.debug("Number of background tasks to execute : {}", tasks.size());
-      CompletionService<BackgroundTaskResult> taskCompletionService =
-          new ExecutorCompletionService<>(exec);
-
-      List<Future<BackgroundTaskResult>> results = Lists.newArrayList();
-      while (tasks.size() > 0) {
-        BackgroundTask task = tasks.poll();
-        Future<BackgroundTaskResult> result =
-            taskCompletionService.submit(task);
-        results.add(result);
-      }
-
-      results.parallelStream().forEach(taskResultFuture -> {
-        try {
-          // Collect task results
-          BackgroundTaskResult result = serviceTimeout > 0
-              ? taskResultFuture.get(serviceTimeout, unit)
-              : taskResultFuture.get();
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("task execution result size {}", result.getSize());
-          }
-        } catch (InterruptedException | ExecutionException e) {
-          LOG.warn(
-              "Background task fails to execute, "
-                  + "retrying in next interval", e);
-        } catch (TimeoutException e) {
-          LOG.warn("Background task executes timed out, "
-              + "retrying in next interval", e);
-        }
-      });
-    }
-  }
-
-  // shutdown and make sure all threads are properly released.
-  public void shutdown() {
-    LOG.info("Shutting down service {}", this.serviceName);
-    exec.shutdown();
-    try {
-      if (!exec.awaitTermination(60, TimeUnit.SECONDS)) {
-        exec.shutdownNow();
-      }
-    } catch (InterruptedException e) {
-      exec.shutdownNow();
-    }
-    if (threadGroup.activeCount() == 0 && !threadGroup.isDestroyed()) {
-      threadGroup.destroy();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTask.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTask.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTask.java
deleted file mode 100644
index 47e8ebc..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTask.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.utils;
-
-import java.util.concurrent.Callable;
-
-/**
- * A task thread to run by {@link BackgroundService}.
- */
-public interface BackgroundTask<T> extends Callable<T> {
-
-  int getPriority();
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTaskQueue.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTaskQueue.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTaskQueue.java
deleted file mode 100644
index b56ef0c..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTaskQueue.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.utils;
-
-import java.util.PriorityQueue;
-
-/**
- * A priority queue that stores a number of {@link BackgroundTask}.
- */
-public class BackgroundTaskQueue {
-
-  private final PriorityQueue<BackgroundTask> tasks;
-
-  public BackgroundTaskQueue() {
-    tasks = new PriorityQueue<>((task1, task2)
-        -> task1.getPriority() - task2.getPriority());
-  }
-
-  /**
-   * @return the head task in this queue.
-   */
-  public synchronized BackgroundTask poll() {
-    return tasks.poll();
-  }
-
-  /**
-   * Add a {@link BackgroundTask} to the queue,
-   * the task will be sorted by its priority.
-   *
-   * @param task
-   */
-  public synchronized void add(BackgroundTask task) {
-    tasks.add(task);
-  }
-
-  /**
-   * @return true if the queue contains no task, false otherwise.
-   */
-  public synchronized boolean isEmpty() {
-    return tasks.isEmpty();
-  }
-
-  /**
-   * @return the size of the queue.
-   */
-  public synchronized int size() {
-    return tasks.size();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTaskResult.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTaskResult.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTaskResult.java
deleted file mode 100644
index 198300f..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTaskResult.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.utils;
-
-/**
- * Result of a {@link BackgroundTask}.
- */
-public interface BackgroundTaskResult {
-
-  /**
-   * Returns the size of entries included in this result.
-   */
-  int getSize();
-
-  /**
-   * An empty task result implementation.
-   */
-  class EmptyTaskResult implements BackgroundTaskResult {
-
-    public static EmptyTaskResult newResult() {
-      return new EmptyTaskResult();
-    }
-
-    @Override
-    public int getSize() {
-      return 0;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BatchOperation.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BatchOperation.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BatchOperation.java
deleted file mode 100644
index 47699eb..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BatchOperation.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.utils;
-
-import com.google.common.collect.Lists;
-
-import java.util.List;
-
-/**
- * An utility class to store a batch of DB write operations.
- */
-public class BatchOperation {
-
-  /**
-   * Enum for write operations.
-   */
-  public enum Operation {
-    DELETE, PUT
-  }
-
-  private List<SingleOperation> operations =
-      Lists.newArrayList();
-
-  /**
-   * Add a PUT operation into the batch.
-   */
-  public void put(byte[] key, byte[] value) {
-    operations.add(new SingleOperation(Operation.PUT, key, value));
-  }
-
-  /**
-   * Add a DELETE operation into the batch.
-   */
-  public void delete(byte[] key) {
-    operations.add(new SingleOperation(Operation.DELETE, key, null));
-
-  }
-
-  public List<SingleOperation> getOperations() {
-    return operations;
-  }
-
-  /**
-   * A SingleOperation represents a PUT or DELETE operation
-   * and the data the operation needs to manipulates.
-   */
-  public static class SingleOperation {
-
-    private Operation opt;
-    private byte[] key;
-    private byte[] value;
-
-    public SingleOperation(Operation opt, byte[] key, byte[] value) {
-      this.opt = opt;
-      if (key == null) {
-        throw new IllegalArgumentException("key cannot be null");
-      }
-      this.key = key.clone();
-      this.value = value == null ? null : value.clone();
-    }
-
-    public Operation getOpt() {
-      return opt;
-    }
-
-    public byte[] getKey() {
-      return key.clone();
-    }
-
-    public byte[] getValue() {
-      return value == null ? null : value.clone();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/EntryConsumer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/EntryConsumer.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/EntryConsumer.java
deleted file mode 100644
index c407398..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/EntryConsumer.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.utils;
-
-import java.io.IOException;
-
-/**
- * A consumer for metadata store key-value entries.
- * Used by {@link MetadataStore} class.
- */
-@FunctionalInterface
-public interface EntryConsumer {
-
-  /**
-   * Consumes a key and value and produces a boolean result.
-   * @param key key
-   * @param value value
-   * @return a boolean value produced by the consumer
-   * @throws IOException
-   */
-  boolean consume(byte[] key, byte[] value) throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/HddsVersionInfo.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/HddsVersionInfo.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/HddsVersionInfo.java
deleted file mode 100644
index e7f697a..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/HddsVersionInfo.java
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.utils;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.util.ClassUtil;
-import org.apache.hadoop.util.ThreadUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Properties;
-
-/**
- * This class returns build information about Hadoop components.
- */
[email protected]
[email protected]
-public class HddsVersionInfo {
-  private static final Logger LOG = LoggerFactory.getLogger(
-      HddsVersionInfo.class);
-
-  private Properties info;
-
-  protected HddsVersionInfo(String component) {
-    info = new Properties();
-    String versionInfoFile = component + "-version-info.properties";
-    InputStream is = null;
-    try {
-      is = 
ThreadUtil.getResourceAsStream(HddsVersionInfo.class.getClassLoader(),
-          versionInfoFile);
-      info.load(is);
-    } catch (IOException ex) {
-      LoggerFactory.getLogger(getClass()).warn("Could not read '" +
-          versionInfoFile + "', " + ex.toString(), ex);
-    } finally {
-      IOUtils.closeStream(is);
-    }
-  }
-
-  protected String _getVersion() {
-    return info.getProperty("version", "Unknown");
-  }
-
-  protected String _getRevision() {
-    return info.getProperty("revision", "Unknown");
-  }
-
-  protected String _getBranch() {
-    return info.getProperty("branch", "Unknown");
-  }
-
-  protected String _getDate() {
-    return info.getProperty("date", "Unknown");
-  }
-
-  protected String _getUser() {
-    return info.getProperty("user", "Unknown");
-  }
-
-  protected String _getUrl() {
-    return info.getProperty("url", "Unknown");
-  }
-
-  protected String _getSrcChecksum() {
-    return info.getProperty("srcChecksum", "Unknown");
-  }
-
-  protected String _getBuildVersion(){
-    return _getVersion() +
-      " from " + _getRevision() +
-      " by " + _getUser() +
-      " source checksum " + _getSrcChecksum();
-  }
-
-  protected String _getProtocVersion() {
-    return info.getProperty("protocVersion", "Unknown");
-  }
-
-  private static final HddsVersionInfo HDDS_VERSION_INFO =
-      new HddsVersionInfo("hdds");
-  /**
-   * Get the HDDS version.
-   * @return the Hdds version string, eg. "0.6.3-dev"
-   */
-  public static String getVersion() {
-    return HDDS_VERSION_INFO._getVersion();
-  }
-
-  /**
-   * Get the Git commit hash of the repository when compiled.
-   * @return the commit hash, eg. "18f64065d5db6208daf50b02c1b5ed4ee3ce547a"
-   */
-  public static String getRevision() {
-    return HDDS_VERSION_INFO._getRevision();
-  }
-
-  /**
-   * Get the branch on which this originated.
-   * @return The branch name, e.g. "trunk" or "branches/branch-0.20"
-   */
-  public static String getBranch() {
-    return HDDS_VERSION_INFO._getBranch();
-  }
-
-  /**
-   * The date that HDDS was compiled.
-   * @return the compilation date in unix date format
-   */
-  public static String getDate() {
-    return HDDS_VERSION_INFO._getDate();
-  }
-
-  /**
-   * The user that compiled HDDS.
-   * @return the username of the user
-   */
-  public static String getUser() {
-    return HDDS_VERSION_INFO._getUser();
-  }
-
-  /**
-   * Get the URL for the HDDS repository.
-   * @return the URL of the Hdds repository
-   */
-  public static String getUrl() {
-    return HDDS_VERSION_INFO._getUrl();
-  }
-
-  /**
-   * Get the checksum of the source files from which HDDS was built.
-   * @return the checksum of the source files
-   */
-  public static String getSrcChecksum() {
-    return HDDS_VERSION_INFO._getSrcChecksum();
-  }
-
-  /**
-   * Returns the buildVersion which includes version,
-   * revision, user and date.
-   * @return the buildVersion
-   */
-  public static String getBuildVersion(){
-    return HDDS_VERSION_INFO._getBuildVersion();
-  }
-
-  /**
-   * Returns the protoc version used for the build.
-   * @return the protoc version
-   */
-  public static String getProtocVersion(){
-    return HDDS_VERSION_INFO._getProtocVersion();
-  }
-
-  public static void main(String[] args) {
-    System.out.println("Using HDDS " + getVersion());
-    System.out.println("Source code repository " + getUrl() + " -r " +
-        getRevision());
-    System.out.println("Compiled by " + getUser() + " on " + getDate());
-    System.out.println("Compiled with protoc " + getProtocVersion());
-    System.out.println("From source with checksum " + getSrcChecksum());
-    LOG.debug("This command was run using " +
-        ClassUtil.findContainingJar(HddsVersionInfo.class));
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStore.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStore.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStore.java
deleted file mode 100644
index ed116a3..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStore.java
+++ /dev/null
@@ -1,387 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.utils;
-
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter;
-import org.fusesource.leveldbjni.JniDBFactory;
-import org.iq80.leveldb.DB;
-import org.iq80.leveldb.DBIterator;
-import org.iq80.leveldb.Options;
-import org.iq80.leveldb.ReadOptions;
-import org.iq80.leveldb.Snapshot;
-import org.iq80.leveldb.WriteBatch;
-import org.iq80.leveldb.WriteOptions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-/**
- * LevelDB interface.
- */
-public class LevelDBStore implements MetadataStore {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(LevelDBStore.class);
-
-  private DB db;
-  private final File dbFile;
-  private final Options dbOptions;
-  private final WriteOptions writeOptions;
-
-  public LevelDBStore(File dbPath, boolean createIfMissing)
-      throws IOException {
-    dbOptions = new Options();
-    dbOptions.createIfMissing(createIfMissing);
-    this.dbFile = dbPath;
-    this.writeOptions = new WriteOptions().sync(true);
-    openDB(dbPath, dbOptions);
-  }
-
-  /**
-   * Opens a DB file.
-   *
-   * @param dbPath          - DB File path
-   * @throws IOException
-   */
-  public LevelDBStore(File dbPath, Options options)
-      throws IOException {
-    dbOptions = options;
-    this.dbFile = dbPath;
-    this.writeOptions = new WriteOptions().sync(true);
-    openDB(dbPath, dbOptions);
-  }
-
-  private void openDB(File dbPath, Options options) throws IOException {
-    if (dbPath.getParentFile().mkdirs()) {
-      LOG.debug("Db path {} created.", dbPath.getParentFile());
-    }
-    db = JniDBFactory.factory.open(dbPath, options);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("LevelDB successfully opened");
-      LOG.debug("[Option] cacheSize = " + options.cacheSize());
-      LOG.debug("[Option] createIfMissing = " + options.createIfMissing());
-      LOG.debug("[Option] blockSize = " + options.blockSize());
-      LOG.debug("[Option] compressionType= " + options.compressionType());
-      LOG.debug("[Option] maxOpenFiles= " + options.maxOpenFiles());
-      LOG.debug("[Option] writeBufferSize= "+ options.writeBufferSize());
-    }
-  }
-
-  /**
-   * Puts a Key into file.
-   *
-   * @param key   - key
-   * @param value - value
-   */
-  @Override
-  public void put(byte[] key, byte[] value) {
-    db.put(key, value, writeOptions);
-  }
-
-  /**
-   * Get Key.
-   *
-   * @param key key
-   * @return value
-   */
-  @Override
-  public byte[] get(byte[] key) {
-    return db.get(key);
-  }
-
-  /**
-   * Delete Key.
-   *
-   * @param key - Key
-   */
-  @Override
-  public void delete(byte[] key) {
-    db.delete(key);
-  }
-
-  /**
-   * Closes the DB.
-   *
-   * @throws IOException
-   */
-  @Override
-  public void close() throws IOException {
-    if (db != null){
-      db.close();
-    }
-  }
-
-  /**
-   * Returns true if the DB is empty.
-   *
-   * @return boolean
-   * @throws IOException
-   */
-  @Override
-  public boolean isEmpty() throws IOException {
-    try (DBIterator iter = db.iterator()) {
-      iter.seekToFirst();
-      boolean hasNext = !iter.hasNext();
-      return hasNext;
-    }
-  }
-
-  /**
-   * Returns the actual levelDB object.
-   * @return DB handle.
-   */
-  public DB getDB() {
-    return db;
-  }
-
-  /**
-   * Returns an iterator on all the key-value pairs in the DB.
-   * @return an iterator on DB entries.
-   */
-  public DBIterator getIterator() {
-    return db.iterator();
-  }
-
-
-  @Override
-  public void destroy() throws IOException {
-    close();
-    JniDBFactory.factory.destroy(dbFile, dbOptions);
-  }
-
-  @Override
-  public ImmutablePair<byte[], byte[]> peekAround(int offset,
-      byte[] from) throws IOException, IllegalArgumentException {
-    try (DBIterator it = db.iterator()) {
-      if (from == null) {
-        it.seekToFirst();
-      } else {
-        it.seek(from);
-      }
-      if (!it.hasNext()) {
-        return null;
-      }
-      switch (offset) {
-      case 0:
-        Entry<byte[], byte[]> current = it.next();
-        return new ImmutablePair<>(current.getKey(), current.getValue());
-      case 1:
-        if (it.next() != null && it.hasNext()) {
-          Entry<byte[], byte[]> next = it.peekNext();
-          return new ImmutablePair<>(next.getKey(), next.getValue());
-        }
-        break;
-      case -1:
-        if (it.hasPrev()) {
-          Entry<byte[], byte[]> prev = it.peekPrev();
-          return new ImmutablePair<>(prev.getKey(), prev.getValue());
-        }
-        break;
-      default:
-        throw new IllegalArgumentException(
-            "Position can only be -1, 0 " + "or 1, but found " + offset);
-      }
-    }
-    return null;
-  }
-
-  @Override
-  public void iterate(byte[] from, EntryConsumer consumer)
-      throws IOException {
-    try (DBIterator iter = db.iterator()) {
-      if (from != null) {
-        iter.seek(from);
-      } else {
-        iter.seekToFirst();
-      }
-      while (iter.hasNext()) {
-        Entry<byte[], byte[]> current = iter.next();
-        if (!consumer.consume(current.getKey(),
-            current.getValue())) {
-          break;
-        }
-      }
-    }
-  }
-
-  /**
-   * Compacts the DB by removing deleted keys etc.
-   * @throws IOException if there is an error.
-   */
-  @Override
-  public void compactDB() throws IOException {
-    if(db != null) {
-      // From LevelDB docs : begin == null and end == null means the whole DB.
-      db.compactRange(null, null);
-    }
-  }
-
-  @Override
-  public void writeBatch(BatchOperation operation) throws IOException {
-    List<BatchOperation.SingleOperation> operations =
-        operation.getOperations();
-    if (!operations.isEmpty()) {
-      try (WriteBatch writeBatch = db.createWriteBatch()) {
-        for (BatchOperation.SingleOperation opt : operations) {
-          switch (opt.getOpt()) {
-          case DELETE:
-            writeBatch.delete(opt.getKey());
-            break;
-          case PUT:
-            writeBatch.put(opt.getKey(), opt.getValue());
-            break;
-          default:
-            throw new IllegalArgumentException("Invalid operation "
-                + opt.getOpt());
-          }
-        }
-        db.write(writeBatch);
-      }
-    }
-  }
-
-  @Override
-  public List<Map.Entry<byte[], byte[]>> getRangeKVs(byte[] startKey,
-      int count, MetadataKeyFilters.MetadataKeyFilter... filters)
-      throws IOException, IllegalArgumentException {
-    return getRangeKVs(startKey, count, false, filters);
-  }
-
-  @Override
-  public List<Map.Entry<byte[], byte[]>> getSequentialRangeKVs(byte[] startKey,
-      int count, MetadataKeyFilters.MetadataKeyFilter... filters)
-      throws IOException, IllegalArgumentException {
-    return getRangeKVs(startKey, count, true, filters);
-  }
-
-  /**
-   * Returns a certain range of key value pairs as a list based on a
-   * startKey or count. Further a {@link MetadataKeyFilter} can be added to
-   * filter keys if necessary. To prevent race conditions while listing
-   * entries, this implementation takes a snapshot and lists the entries from
-   * the snapshot. This may, on the other hand, cause the range result slight
-   * different with actual data if data is updating concurrently.
-   * <p>
-   * If the startKey is specified and found in levelDB, this key and the keys
-   * after this key will be included in the result. If the startKey is null
-   * all entries will be included as long as other conditions are satisfied.
-   * If the given startKey doesn't exist, an empty list will be returned.
-   * <p>
-   * The count argument is to limit number of total entries to return,
-   * the value for count must be an integer greater than 0.
-   * <p>
-   * This method allows to specify one or more {@link MetadataKeyFilter}
-   * to filter keys by certain condition. Once given, only the entries
-   * whose key passes all the filters will be included in the result.
-   *
-   * @param startKey a start key.
-   * @param count max number of entries to return.
-   * @param filters customized one or more {@link MetadataKeyFilter}.
-   * @return a list of entries found in the database or an empty list if the
-   * startKey is invalid.
-   * @throws IOException if there are I/O errors.
-   * @throws IllegalArgumentException if count is less than 0.
-   */
-  private List<Entry<byte[], byte[]>> getRangeKVs(byte[] startKey,
-      int count, boolean sequential, MetadataKeyFilter... filters)
-      throws IOException {
-    List<Entry<byte[], byte[]>> result = new ArrayList<>();
-    long start = System.currentTimeMillis();
-    if (count < 0) {
-      throw new IllegalArgumentException(
-          "Invalid count given " + count + ", count must be greater than 0");
-    }
-    Snapshot snapShot = null;
-    DBIterator dbIter = null;
-    try {
-      snapShot = db.getSnapshot();
-      ReadOptions readOptions = new ReadOptions().snapshot(snapShot);
-      dbIter = db.iterator(readOptions);
-      if (startKey == null) {
-        dbIter.seekToFirst();
-      } else {
-        if (db.get(startKey) == null) {
-          // Key not found, return empty list
-          return result;
-        }
-        dbIter.seek(startKey);
-      }
-      while (dbIter.hasNext() && result.size() < count) {
-        byte[] preKey = dbIter.hasPrev() ? dbIter.peekPrev().getKey() : null;
-        byte[] nextKey = dbIter.hasNext() ? dbIter.peekNext().getKey() : null;
-        Entry<byte[], byte[]> current = dbIter.next();
-
-        if (filters == null) {
-          result.add(current);
-        } else {
-          if (Arrays.asList(filters).stream().allMatch(
-              entry -> entry.filterKey(preKey, current.getKey(), nextKey))) {
-            result.add(current);
-          } else {
-            if (result.size() > 0 && sequential) {
-              // if the caller asks for a sequential range of results,
-              // and we met a dis-match, abort iteration from here.
-              // if result is empty, we continue to look for the first match.
-              break;
-            }
-          }
-        }
-      }
-    } finally {
-      if (snapShot != null) {
-        snapShot.close();
-      }
-      if (dbIter != null) {
-        dbIter.close();
-      }
-      if (LOG.isDebugEnabled()) {
-        if (filters != null) {
-          for (MetadataKeyFilters.MetadataKeyFilter filter : filters) {
-            int scanned = filter.getKeysScannedNum();
-            int hinted = filter.getKeysHintedNum();
-            if (scanned > 0 || hinted > 0) {
-              LOG.debug(
-                  "getRangeKVs ({}) numOfKeysScanned={}, numOfKeysHinted={}",
-                  filter.getClass().getSimpleName(), 
filter.getKeysScannedNum(),
-                  filter.getKeysHintedNum());
-            }
-          }
-        }
-        long end = System.currentTimeMillis();
-        long timeConsumed = end - start;
-        LOG.debug("Time consumed for getRangeKVs() is {}ms,"
-            + " result length is {}.", timeConsumed, result.size());
-      }
-    }
-    return result;
-  }
-
-  @Override
-  public MetaStoreIterator<KeyValue> iterator() {
-    return new LevelDBStoreIterator(db.iterator());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStoreIterator.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStoreIterator.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStoreIterator.java
deleted file mode 100644
index 7b62f7a..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStoreIterator.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.utils;
-
-import org.iq80.leveldb.DBIterator;
-import java.util.Map;
-import java.util.NoSuchElementException;
-
-import org.apache.hadoop.utils.MetadataStore.KeyValue;
-
-
-/**
- * LevelDB store iterator.
- */
-public class LevelDBStoreIterator implements MetaStoreIterator<KeyValue> {
-
-
-  private DBIterator levelDBIterator;
-
-  public LevelDBStoreIterator(DBIterator iterator) {
-    this.levelDBIterator = iterator;
-    levelDBIterator.seekToFirst();
-  }
-
-  @Override
-  public boolean hasNext() {
-    return levelDBIterator.hasNext();
-  }
-
-  @Override
-  public KeyValue next() {
-    if(levelDBIterator.hasNext()) {
-      Map.Entry<byte[], byte[]> entry = levelDBIterator.next();
-      return KeyValue.create(entry.getKey(), entry.getValue());
-    }
-    throw new NoSuchElementException("LevelDB Store has no more elements");
-  }
-
-  @Override
-  public void seekToFirst() {
-    levelDBIterator.seekToFirst();
-  }
-
-  @Override
-  public void seekToLast() {
-    levelDBIterator.seekToLast();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java
deleted file mode 100644
index 52d0a3e..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.utils;
-
-import java.util.Iterator;
-
-/**
- * Iterator for MetaDataStore DB.
- * @param <T>
- */
-public interface MetaStoreIterator<T> extends Iterator<T> {
-
-  /**
-   * seek to first entry.
-   */
-  void seekToFirst();
-
-  /**
-   * seek to last entry.
-   */
-  void seekToLast();
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
deleted file mode 100644
index a3430f8..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.utils;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ozone.OzoneConsts;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * An utility class to filter levelDB keys.
- */
-public final class MetadataKeyFilters {
-
-  private static KeyPrefixFilter deletingKeyFilter =
-      new MetadataKeyFilters.KeyPrefixFilter()
-          .addFilter(OzoneConsts.DELETING_KEY_PREFIX);
-
-  private static KeyPrefixFilter deletedKeyFilter =
-      new MetadataKeyFilters.KeyPrefixFilter()
-          .addFilter(OzoneConsts.DELETED_KEY_PREFIX);
-
-  private static KeyPrefixFilter normalKeyFilter =
-      new MetadataKeyFilters.KeyPrefixFilter()
-          .addFilter(OzoneConsts.DELETING_KEY_PREFIX, true)
-          .addFilter(OzoneConsts.DELETED_KEY_PREFIX, true)
-          .addFilter(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX, true);
-
-  private MetadataKeyFilters() {
-  }
-
-  public static KeyPrefixFilter getDeletingKeyFilter() {
-    return deletingKeyFilter;
-  }
-
-  public static KeyPrefixFilter getDeletedKeyFilter() {
-    return deletedKeyFilter;
-  }
-
-  public static KeyPrefixFilter getNormalKeyFilter() {
-    return normalKeyFilter;
-  }
-  /**
-   * Interface for levelDB key filters.
-   */
-  public interface MetadataKeyFilter {
-    /**
-     * Filter levelDB key with a certain condition.
-     *
-     * @param preKey     previous key.
-     * @param currentKey current key.
-     * @param nextKey    next key.
-     * @return true if a certain condition satisfied, return false otherwise.
-     */
-    boolean filterKey(byte[] preKey, byte[] currentKey, byte[] nextKey);
-
-    default int getKeysScannedNum() {
-      return 0;
-    }
-
-    default int getKeysHintedNum() {
-      return 0;
-    }
-  }
-
-  /**
-   * Utility class to filter key by a string prefix. This filter
-   * assumes keys can be parsed to a string.
-   */
-  public static class KeyPrefixFilter implements MetadataKeyFilter {
-
-    private List<String> positivePrefixList = new ArrayList<>();
-    private List<String> negativePrefixList = new ArrayList<>();
-    private boolean atleastOnePositiveMatch;
-    private int keysScanned = 0;
-    private int keysHinted = 0;
-
-    public KeyPrefixFilter() {}
-
-    /**
-     * KeyPrefixFilter constructor. It is made of positive and negative prefix
-     * list. PositivePrefixList is the list of prefixes which are accepted
-     * whereas negativePrefixList contains the list of prefixes which are
-     * rejected.
-     *
-     * @param atleastOnePositiveMatch if positive it requires key to be 
accepted
-     *                               by atleast one positive filter.
-     */
-    public KeyPrefixFilter(boolean atleastOnePositiveMatch) {
-      this.atleastOnePositiveMatch = atleastOnePositiveMatch;
-    }
-
-    public KeyPrefixFilter addFilter(String keyPrefix) {
-      addFilter(keyPrefix, false);
-      return this;
-    }
-
-    public KeyPrefixFilter addFilter(String keyPrefix, boolean negative) {
-      Preconditions.checkArgument(!Strings.isNullOrEmpty(keyPrefix),
-          "KeyPrefix is null or empty: " + keyPrefix);
-      // keyPrefix which needs to be added should not be prefix of any opposing
-      // filter already present. If keyPrefix is a negative filter it should 
not
-      // be a prefix of any positive filter. Nor should any opposing filter be
-      // a prefix of keyPrefix.
-      // For example if b0 is accepted b can not be rejected and
-      // if b is accepted b0 can not be rejected. If these scenarios need to be
-      // handled we need to add priorities.
-      if (negative) {
-        Preconditions.checkArgument(positivePrefixList.stream().noneMatch(
-            prefix -> prefix.startsWith(keyPrefix) || keyPrefix
-                .startsWith(prefix)),
-            "KeyPrefix: " + keyPrefix + " already accepted.");
-        this.negativePrefixList.add(keyPrefix);
-      } else {
-        Preconditions.checkArgument(negativePrefixList.stream().noneMatch(
-            prefix -> prefix.startsWith(keyPrefix) || keyPrefix
-                .startsWith(prefix)),
-            "KeyPrefix: " + keyPrefix + " already rejected.");
-        this.positivePrefixList.add(keyPrefix);
-      }
-      return this;
-    }
-
-    @Override
-    public boolean filterKey(byte[] preKey, byte[] currentKey,
-        byte[] nextKey) {
-      keysScanned++;
-      if (currentKey == null) {
-        return false;
-      }
-      boolean accept;
-
-      // There are no filters present
-      if (positivePrefixList.isEmpty() && negativePrefixList.isEmpty()) {
-        return true;
-      }
-
-      accept = !positivePrefixList.isEmpty() && positivePrefixList.stream()
-          .anyMatch(prefix -> {
-            byte[] prefixBytes = DFSUtil.string2Bytes(prefix);
-            return prefixMatch(prefixBytes, currentKey);
-          });
-      if (accept) {
-        keysHinted++;
-        return true;
-      } else if (atleastOnePositiveMatch) {
-        return false;
-      }
-
-      accept = !negativePrefixList.isEmpty() && negativePrefixList.stream()
-          .allMatch(prefix -> {
-            byte[] prefixBytes = DFSUtil.string2Bytes(prefix);
-            return !prefixMatch(prefixBytes, currentKey);
-          });
-      if (accept) {
-        keysHinted++;
-        return true;
-      }
-
-      return false;
-    }
-
-    @Override
-    public int getKeysScannedNum() {
-      return keysScanned;
-    }
-
-    @Override
-    public int getKeysHintedNum() {
-      return keysHinted;
-    }
-
-    private static boolean prefixMatch(byte[] prefix, byte[] key) {
-      Preconditions.checkNotNull(prefix);
-      Preconditions.checkNotNull(key);
-      if (key.length < prefix.length) {
-        return false;
-      }
-      for (int i = 0; i < prefix.length; i++) {
-        if (key[i] != prefix[i]) {
-          return false;
-        }
-      }
-      return true;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStore.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStore.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStore.java
deleted file mode 100644
index 7d3bc6b..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStore.java
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.utils;
-
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Interface for key-value store that stores ozone metadata.
- * Ozone metadata is stored as key value pairs, both key and value
- * are arbitrary byte arrays.
- */
[email protected]
-public interface MetadataStore extends Closeable{
-
-  /**
-   * Puts a key-value pair into the store.
-   *
-   * @param key metadata key
-   * @param value metadata value
-   */
-  void put(byte[] key, byte[] value) throws IOException;
-
-  /**
-   * @return true if the metadata store is empty.
-   *
-   * @throws IOException
-   */
-  boolean isEmpty() throws IOException;
-
-  /**
-   * Returns the value mapped to the given key in byte array.
-   *
-   * @param key metadata key
-   * @return value in byte array
-   * @throws IOException
-   */
-  byte[] get(byte[] key) throws IOException;
-
-  /**
-   * Deletes a key from the metadata store.
-   *
-   * @param key metadata key
-   * @throws IOException
-   */
-  void delete(byte[] key) throws IOException;
-
-  /**
-   * Returns a certain range of key value pairs as a list based on a
-   * startKey or count. Further a {@link MetadataKeyFilter} can be added to
-   * filter keys if necessary. To prevent race conditions while listing
-   * entries, this implementation takes a snapshot and lists the entries from
-   * the snapshot. This may, on the other hand, cause the range result slight
-   * different with actual data if data is updating concurrently.
-   * <p>
-   * If the startKey is specified and found in levelDB, this key and the keys
-   * after this key will be included in the result. If the startKey is null
-   * all entries will be included as long as other conditions are satisfied.
-   * If the given startKey doesn't exist and empty list will be returned.
-   * <p>
-   * The count argument is to limit number of total entries to return,
-   * the value for count must be an integer greater than 0.
-   * <p>
-   * This method allows to specify one or more {@link MetadataKeyFilter}
-   * to filter keys by certain condition. Once given, only the entries
-   * whose key passes all the filters will be included in the result.
-   *
-   * @param startKey a start key.
-   * @param count max number of entries to return.
-   * @param filters customized one or more {@link MetadataKeyFilter}.
-   * @return a list of entries found in the database or an empty list if the
-   * startKey is invalid.
-   * @throws IOException if there are I/O errors.
-   * @throws IllegalArgumentException if count is less than 0.
-   */
-  List<Map.Entry<byte[], byte[]>> getRangeKVs(byte[] startKey,
-      int count, MetadataKeyFilter... filters)
-      throws IOException, IllegalArgumentException;
-
-  /**
-   * This method is very similar to {@link #getRangeKVs}, the only
-   * different is this method is supposed to return a sequential range
-   * of elements based on the filters. While iterating the elements,
-   * if it met any entry that cannot pass the filter, the iterator will stop
-   * from this point without looking for next match. If no filter is given,
-   * this method behaves just like {@link #getRangeKVs}.
-   *
-   * @param startKey a start key.
-   * @param count max number of entries to return.
-   * @param filters customized one or more {@link MetadataKeyFilter}.
-   * @return a list of entries found in the database.
-   * @throws IOException
-   * @throws IllegalArgumentException
-   */
-  List<Map.Entry<byte[], byte[]>> getSequentialRangeKVs(byte[] startKey,
-      int count, MetadataKeyFilter... filters)
-      throws IOException, IllegalArgumentException;
-
-  /**
-   * A batch of PUT, DELETE operations handled as a single atomic write.
-   *
-   * @throws IOException write fails
-   */
-  void writeBatch(BatchOperation operation) throws IOException;
-
-  /**
-   * Compact the entire database.
-   * @throws IOException
-   */
-  void compactDB() throws IOException;
-
-  /**
-   * Destroy the content of the specified database,
-   * a destroyed database will not be able to load again.
-   * Be very careful with this method.
-   *
-   * @throws IOException if I/O error happens
-   */
-  void destroy() throws IOException;
-
-  /**
-   * Seek the database to a certain key, returns the key-value
-   * pairs around this key based on the given offset. Note, this method
-   * can only support offset -1 (left), 0 (current) and 1 (right),
-   * any other offset given will cause a {@link IllegalArgumentException}.
-   *
-   * @param offset offset to the key
-   * @param from from which key
-   * @return a key-value pair
-   * @throws IOException
-   */
-  ImmutablePair<byte[], byte[]> peekAround(int offset, byte[] from)
-      throws IOException, IllegalArgumentException;
-
-  /**
-   * Iterates entries in the database from a certain key.
-   * Applies the given {@link EntryConsumer} to the key and value of
-   * each entry, the function produces a boolean result which is used
-   * as the criteria to exit from iteration.
-   *
-   * @param from the start key
-   * @param consumer
-   *   a {@link EntryConsumer} applied to each key and value. If the consumer
-   *   returns true, continues the iteration to next entry; otherwise exits
-   *   the iteration.
-   * @throws IOException
-   */
-  void iterate(byte[] from, EntryConsumer consumer)
-      throws IOException;
-
-  /**
-   * Returns the iterator for this metadata store.
-   * @return MetaStoreIterator
-   */
-  MetaStoreIterator<KeyValue> iterator();
-
-  /**
-   * Class used to represent the key and value pair of a db entry.
-   */
-  class KeyValue {
-
-    private final byte[] key;
-    private final byte[] value;
-
-    /**
-     * KeyValue Constructor, used to represent a key and value of a db entry.
-     * @param key
-     * @param value
-     */
-    private KeyValue(byte[] key, byte[] value) {
-      this.key = key;
-      this.value = value;
-    }
-
-    /**
-     * Return key.
-     * @return byte[]
-     */
-    public byte[] getKey() {
-      byte[] result = new byte[key.length];
-      System.arraycopy(key, 0, result, 0, key.length);
-      return result;
-    }
-
-    /**
-     * Return value.
-     * @return byte[]
-     */
-    public byte[] getValue() {
-      byte[] result = new byte[value.length];
-      System.arraycopy(value, 0, result, 0, value.length);
-      return result;
-    }
-
-    /**
-     * Create a KeyValue pair.
-     * @param key
-     * @param value
-     * @return KeyValue object.
-     */
-    public static KeyValue create(byte[] key, byte[] value) {
-      return new KeyValue(key, value);
-    }
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to