http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java
deleted file mode 100644
index cfdf025..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java
+++ /dev/null
@@ -1,303 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.utils;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.OzoneClientUtils;
-import org.apache.hadoop.ozone.client.io.LengthInputStream;
-import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.web.handlers.UserArgs;
-import org.apache.hadoop.ozone.client.rest.headers.Header;
-import org.apache.hadoop.scm.ScmConfigKeys;
-import org.apache.hadoop.util.Time;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Request;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.MediaType;
-import java.io.File;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.nio.charset.Charset;
-import java.nio.file.Paths;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.List;
-import java.util.Locale;
-import java.util.TimeZone;
-import java.util.UUID;
-
-/**
- * Set of Utility functions used in ozone.
- */
[email protected]
-public final class OzoneUtils {
-
-  public static final String ENCODING_NAME = "UTF-8";
-  public static final Charset ENCODING = Charset.forName(ENCODING_NAME);
-
-  private OzoneUtils() {
-    // Never constructed
-  }
-
-  /**
-   * Date format that used in ozone. Here the format is thread safe to use.
-   */
-  private static final ThreadLocal<SimpleDateFormat> DATE_FORMAT =
-      new ThreadLocal<SimpleDateFormat>() {
-    @Override
-    protected SimpleDateFormat initialValue() {
-      SimpleDateFormat format = new SimpleDateFormat(
-          OzoneConsts.OZONE_DATE_FORMAT, Locale.US);
-      format.setTimeZone(TimeZone.getTimeZone(OzoneConsts.OZONE_TIME_ZONE));
-
-      return format;
-    }
-  };
-
-  /**
-   * verifies that bucket name / volume name is a valid DNS name.
-   *
-   * @param resName Bucket or volume Name to be validated
-   *
-   * @throws IllegalArgumentException
-   */
-  public static void verifyResourceName(String resName)
-      throws IllegalArgumentException {
-    OzoneClientUtils.verifyResourceName(resName);
-  }
-
-  /**
-   * Verifies that max key length is a valid value.
-   *
-   * @param length
-   *          The max key length to be validated
-   *
-   * @throws IllegalArgumentException
-   */
-  public static void verifyMaxKeyLength(String length)
-      throws IllegalArgumentException {
-    int maxKey = 0;
-    try {
-      maxKey = Integer.parseInt(length);
-    } catch (NumberFormatException nfe) {
-      throw new IllegalArgumentException(
-          "Invalid max key length, the vaule should be digital.");
-    }
-
-    if (maxKey <= 0) {
-      throw new IllegalArgumentException(
-          "Invalid max key length, the vaule should be a positive number.");
-    }
-  }
-
-  /**
-   * Returns a random Request ID.
-   *
-   * Request ID is returned to the client as well as flows through the system
-   * facilitating debugging on why a certain request failed.
-   *
-   * @return String random request ID
-   */
-  public static String getRequestID() {
-    return UUID.randomUUID().toString();
-  }
-
-  /**
-   * Return host name if possible.
-   *
-   * @return Host Name or localhost
-   */
-  public static String getHostName() {
-    String host = "localhost";
-    try {
-      host = InetAddress.getLocalHost().getHostName();
-    } catch (UnknownHostException e) {
-      // Ignore the error
-    }
-    return host;
-  }
-
-  /**
-   * Basic validate routine to make sure that all the
-   * required headers are in place.
-   *
-   * @param request - http request
-   * @param headers - http headers
-   * @param reqId - request id
-   * @param resource - Resource Name
-   * @param hostname - Hostname
-   *
-   * @throws OzoneException
-   */
-  public static void validate(Request request, HttpHeaders headers,
-                              String reqId, String resource, String hostname)
-      throws OzoneException {
-
-    List<String> ozHeader =
-        headers.getRequestHeader(Header.OZONE_VERSION_HEADER);
-    if (ozHeader == null) {
-      throw ErrorTable
-          .newError(ErrorTable.MISSING_VERSION, reqId, resource, hostname);
-    }
-
-    List<String> date = headers.getRequestHeader(HttpHeaders.DATE);
-    if (date == null) {
-      throw ErrorTable
-          .newError(ErrorTable.MISSING_DATE, reqId, resource, hostname);
-    }
-
-    /*
-    TODO :
-    Ignore the results for time being. Eventually we can validate if the
-    request Date time is too skewed and reject if it is so.
-    */
-    parseDate(date.get(0), reqId, resource, hostname);
-
-  }
-
-  /**
-   * Parses the Date String coming from the Users.
-   *
-   * @param dateString - Date String
-   * @param reqID - Ozone Request ID
-   * @param resource - Resource Name
-   * @param hostname - HostName
-   *
-   * @return - Date
-   *
-   * @throws OzoneException - in case of parsing error
-   */
-  public static synchronized Date parseDate(String dateString, String reqID,
-                                            String resource, String hostname)
-      throws OzoneException {
-    try {
-      return DATE_FORMAT.get().parse(dateString);
-    } catch (ParseException ex) {
-      OzoneException exp =
-          ErrorTable.newError(ErrorTable.BAD_DATE, reqID, resource, hostname);
-      exp.setMessage(ex.getMessage());
-      throw exp;
-    }
-  }
-
-  /**
-   * Returns a response with appropriate OZONE headers and payload.
-   *
-   * @param args - UserArgs or Inherited class
-   * @param statusCode - HttpStatus code
-   * @param payload - Content Body
-   *
-   * @return JAX-RS Response
-   */
-  public static Response getResponse(UserArgs args, int statusCode,
-                                     String payload) {
-    String date = DATE_FORMAT.get().format(new Date(Time.now()));
-    return Response.ok(payload)
-        .header(Header.OZONE_SERVER_NAME, args.getHostName())
-        .header(Header.OZONE_REQUEST_ID, args.getRequestID())
-        .header(HttpHeaders.DATE, date).status(statusCode).build();
-  }
-
-  /**
-   * Returns a response with appropriate OZONE headers and payload.
-   *
-   * @param args - UserArgs or Inherited class
-   * @param statusCode - HttpStatus code
-   * @param stream InputStream
-   *
-   * @return JAX-RS Response
-   */
-  public static Response getResponse(UserArgs args, int statusCode,
-                                     LengthInputStream stream) {
-    String date = DATE_FORMAT.get().format(new Date(Time.now()));
-    return Response.ok(stream, MediaType.APPLICATION_OCTET_STREAM)
-        .header(Header.OZONE_SERVER_NAME, args.getHostName())
-        .header(Header.OZONE_REQUEST_ID, args.getRequestID())
-        .header(HttpHeaders.DATE, date).status(statusCode)
-        .header(HttpHeaders.CONTENT_LENGTH, stream.getLength())
-        .build();
-
-  }
-
-  /**
-   * Checks and creates Ozone Metadir Path if it does not exist.
-   *
-   * @param conf - Configuration
-   *
-   * @return File MetaDir
-   */
-  public static File getOzoneMetaDirPath(Configuration conf) {
-    String metaDirPath = conf.getTrimmed(OzoneConfigKeys
-        .OZONE_METADATA_DIRS);
-    Preconditions.checkNotNull(metaDirPath);
-    File dirPath = new File(metaDirPath);
-    if (!dirPath.exists() && !dirPath.mkdirs()) {
-      throw new IllegalArgumentException("Unable to create paths. Path: " +
-          dirPath);
-    }
-    return dirPath;
-  }
-
-  /**
-   * Get the path for datanode id file.
-   *
-   * @param conf - Configuration
-   * @return the path of datanode id as string
-   */
-  public static String getDatanodeIDPath(Configuration conf) {
-    String dataNodeIDPath = conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID);
-    if (dataNodeIDPath == null) {
-      String metaPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
-      if (Strings.isNullOrEmpty(metaPath)) {
-        // this means meta data is not found, in theory should not happen at
-        // this point because should've failed earlier.
-        throw new IllegalArgumentException("Unable to locate meta data" +
-            "directory when getting datanode id path");
-      }
-      dataNodeIDPath = Paths.get(metaPath,
-          ScmConfigKeys.OZONE_SCM_DATANODE_ID_PATH_DEFAULT).toString();
-    }
-    return dataNodeIDPath;
-  }
-
-  /**
-   * Convert time in millisecond to a human readable format required in ozone.
-   * @return a human readable string for the input time
-   */
-  public static String formatTime(long millis) {
-    return DATE_FORMAT.get().format(millis);
-  }
-
-  /**
-   * Convert time in ozone date format to millisecond.
-   * @return time in milliseconds
-   */
-  public static long formatDate(String date) throws ParseException {
-    Preconditions.checkNotNull(date, "Date string should not be null.");
-    return DATE_FORMAT.get().parse(date).getTime();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java
deleted file mode 100644
index 178157f..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.web.utils;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/BackgroundService.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/BackgroundService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/BackgroundService.java
deleted file mode 100644
index 2ff4e55..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/BackgroundService.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.utils;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.CompletionService;
-import java.util.concurrent.ExecutorCompletionService;
-import java.util.concurrent.Future;
-import java.util.concurrent.ExecutionException;
-
-/**
- * An abstract class for a background service in ozone.
- * A background service schedules multiple child tasks in parallel
- * in a certain period. In each interval, it waits until all the tasks
- * finish execution and then schedule next interval.
- */
-public abstract class BackgroundService {
-
-  @VisibleForTesting
-  public static final Logger LOG =
-      LoggerFactory.getLogger(BackgroundService.class);
-
-  // Executor to launch child tasks
-  private final ScheduledExecutorService exec;
-  private final ThreadGroup threadGroup;
-  private final ThreadFactory threadFactory;
-  private final String serviceName;
-  private final long interval;
-  private final long serviceTimeout;
-  private final TimeUnit unit;
-  private final PeriodicalTask service;
-
-  public BackgroundService(String serviceName, long interval,
-      TimeUnit unit, int threadPoolSize, long serviceTimeout) {
-    this.interval = interval;
-    this.unit = unit;
-    this.serviceName = serviceName;
-    this.serviceTimeout = serviceTimeout;
-    threadGroup = new ThreadGroup(serviceName);
-    ThreadFactory tf = r -> new Thread(threadGroup, r);
-    threadFactory = new ThreadFactoryBuilder()
-        .setThreadFactory(tf)
-        .setDaemon(true)
-        .setNameFormat(serviceName + "#%d")
-        .build();
-    exec = Executors.newScheduledThreadPool(threadPoolSize, threadFactory);
-    service = new PeriodicalTask();
-  }
-
-  protected ExecutorService getExecutorService() {
-    return this.exec;
-  }
-
-  @VisibleForTesting
-  public int getThreadCount() {
-    return threadGroup.activeCount();
-  }
-
-  @VisibleForTesting
-  public void triggerBackgroundTaskForTesting() {
-    service.run();
-  }
-
-  // start service
-  public void start() {
-    exec.scheduleWithFixedDelay(service, 0, interval, unit);
-  }
-
-  public abstract BackgroundTaskQueue getTasks();
-
-  /**
-   * Run one or more background tasks concurrently.
-   * Wait until all tasks to return the result.
-   */
-  public class PeriodicalTask implements Runnable {
-    @Override
-    public synchronized void run() {
-      LOG.debug("Running background service : {}", serviceName);
-      BackgroundTaskQueue tasks = getTasks();
-      if (tasks.isEmpty()) {
-        // No task found, or some problems to init tasks
-        // return and retry in next interval.
-        return;
-      }
-
-      LOG.debug("Number of background tasks to execute : {}", tasks.size());
-      CompletionService<BackgroundTaskResult> taskCompletionService =
-          new ExecutorCompletionService<>(exec);
-
-      List<Future<BackgroundTaskResult>> results = Lists.newArrayList();
-      while (tasks.size() > 0) {
-        BackgroundTask task = tasks.poll();
-        Future<BackgroundTaskResult> result =
-            taskCompletionService.submit(task);
-        results.add(result);
-      }
-
-      results.parallelStream().forEach(taskResultFuture -> {
-        try {
-          // Collect task results
-          BackgroundTaskResult result = serviceTimeout > 0
-              ? taskResultFuture.get(serviceTimeout, TimeUnit.MILLISECONDS)
-              : taskResultFuture.get();
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("task execution result size {}", result.getSize());
-          }
-        } catch (InterruptedException | ExecutionException e) {
-          LOG.warn(
-              "Background task fails to execute, "
-                  + "retrying in next interval", e);
-        } catch (TimeoutException e) {
-          LOG.warn("Background task executes timed out, "
-              + "retrying in next interval", e);
-        }
-      });
-    }
-  }
-
-  // shutdown and make sure all threads are properly released.
-  public void shutdown() {
-    LOG.info("Shutting down service {}", this.serviceName);
-    exec.shutdown();
-    try {
-      if (!exec.awaitTermination(60, TimeUnit.SECONDS)) {
-        exec.shutdownNow();
-      }
-    } catch (InterruptedException e) {
-      exec.shutdownNow();
-    }
-    if (threadGroup.activeCount() == 0 && !threadGroup.isDestroyed()) {
-      threadGroup.destroy();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/BackgroundTask.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/BackgroundTask.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/BackgroundTask.java
deleted file mode 100644
index 47e8ebc..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/BackgroundTask.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.utils;
-
-import java.util.concurrent.Callable;
-
-/**
- * A task thread to run by {@link BackgroundService}.
- */
-public interface BackgroundTask<T> extends Callable<T> {
-
-  int getPriority();
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/BackgroundTaskQueue.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/BackgroundTaskQueue.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/BackgroundTaskQueue.java
deleted file mode 100644
index b56ef0c..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/BackgroundTaskQueue.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.utils;
-
-import java.util.PriorityQueue;
-
-/**
- * A priority queue that stores a number of {@link BackgroundTask}.
- */
-public class BackgroundTaskQueue {
-
-  private final PriorityQueue<BackgroundTask> tasks;
-
-  public BackgroundTaskQueue() {
-    tasks = new PriorityQueue<>((task1, task2)
-        -> task1.getPriority() - task2.getPriority());
-  }
-
-  /**
-   * @return the head task in this queue.
-   */
-  public synchronized BackgroundTask poll() {
-    return tasks.poll();
-  }
-
-  /**
-   * Add a {@link BackgroundTask} to the queue,
-   * the task will be sorted by its priority.
-   *
-   * @param task
-   */
-  public synchronized void add(BackgroundTask task) {
-    tasks.add(task);
-  }
-
-  /**
-   * @return true if the queue contains no task, false otherwise.
-   */
-  public synchronized boolean isEmpty() {
-    return tasks.isEmpty();
-  }
-
-  /**
-   * @return the size of the queue.
-   */
-  public synchronized int size() {
-    return tasks.size();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/BackgroundTaskResult.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/BackgroundTaskResult.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/BackgroundTaskResult.java
deleted file mode 100644
index 198300f..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/BackgroundTaskResult.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.utils;
-
-/**
- * Result of a {@link BackgroundTask}.
- */
-public interface BackgroundTaskResult {
-
-  /**
-   * Returns the size of entries included in this result.
-   */
-  int getSize();
-
-  /**
-   * An empty task result implementation.
-   */
-  class EmptyTaskResult implements BackgroundTaskResult {
-
-    public static EmptyTaskResult newResult() {
-      return new EmptyTaskResult();
-    }
-
-    @Override
-    public int getSize() {
-      return 0;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/BatchOperation.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/BatchOperation.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/BatchOperation.java
deleted file mode 100644
index 47699eb..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/BatchOperation.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.utils;
-
-import com.google.common.collect.Lists;
-
-import java.util.List;
-
-/**
- * An utility class to store a batch of DB write operations.
- */
-public class BatchOperation {
-
-  /**
-   * Enum for write operations.
-   */
-  public enum Operation {
-    DELETE, PUT
-  }
-
-  private List<SingleOperation> operations =
-      Lists.newArrayList();
-
-  /**
-   * Add a PUT operation into the batch.
-   */
-  public void put(byte[] key, byte[] value) {
-    operations.add(new SingleOperation(Operation.PUT, key, value));
-  }
-
-  /**
-   * Add a DELETE operation into the batch.
-   */
-  public void delete(byte[] key) {
-    operations.add(new SingleOperation(Operation.DELETE, key, null));
-
-  }
-
-  public List<SingleOperation> getOperations() {
-    return operations;
-  }
-
-  /**
-   * A SingleOperation represents a PUT or DELETE operation
-   * and the data the operation needs to manipulates.
-   */
-  public static class SingleOperation {
-
-    private Operation opt;
-    private byte[] key;
-    private byte[] value;
-
-    public SingleOperation(Operation opt, byte[] key, byte[] value) {
-      this.opt = opt;
-      if (key == null) {
-        throw new IllegalArgumentException("key cannot be null");
-      }
-      this.key = key.clone();
-      this.value = value == null ? null : value.clone();
-    }
-
-    public Operation getOpt() {
-      return opt;
-    }
-
-    public byte[] getKey() {
-      return key.clone();
-    }
-
-    public byte[] getValue() {
-      return value == null ? null : value.clone();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/EntryConsumer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/EntryConsumer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/EntryConsumer.java
deleted file mode 100644
index c407398..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/EntryConsumer.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.utils;
-
-import java.io.IOException;
-
-/**
- * A consumer for metadata store key-value entries.
- * Used by {@link MetadataStore} class.
- */
-@FunctionalInterface
-public interface EntryConsumer {
-
-  /**
-   * Consumes a key and value and produces a boolean result.
-   * @param key key
-   * @param value value
-   * @return a boolean value produced by the consumer
-   * @throws IOException
-   */
-  boolean consume(byte[] key, byte[] value) throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/LevelDBStore.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/LevelDBStore.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/LevelDBStore.java
deleted file mode 100644
index 38cee76..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/LevelDBStore.java
+++ /dev/null
@@ -1,379 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.utils;
-
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter;
-import org.fusesource.leveldbjni.JniDBFactory;
-import org.iq80.leveldb.WriteBatch;
-import org.iq80.leveldb.DB;
-import org.iq80.leveldb.Options;
-import org.iq80.leveldb.WriteOptions;
-import org.iq80.leveldb.DBIterator;
-import org.iq80.leveldb.Snapshot;
-import org.iq80.leveldb.ReadOptions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Map;
-import java.util.Map.Entry;
-
-/**
- * LevelDB interface.
- */
-public class LevelDBStore implements MetadataStore {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(LevelDBStore.class);
-
-  private DB db;
-  private final File dbFile;
-  private final Options dbOptions;
-  private final WriteOptions writeOptions;
-
-  public LevelDBStore(File dbPath, boolean createIfMissing)
-      throws IOException {
-    dbOptions = new Options();
-    dbOptions.createIfMissing(createIfMissing);
-    this.dbFile = dbPath;
-    this.writeOptions = new WriteOptions().sync(true);
-    openDB(dbPath, dbOptions);
-  }
-
-  /**
-   * Opens a DB file.
-   *
-   * @param dbPath          - DB File path
-   * @throws IOException
-   */
-  public LevelDBStore(File dbPath, Options options)
-      throws IOException {
-    dbOptions = options;
-    this.dbFile = dbPath;
-    this.writeOptions = new WriteOptions().sync(true);
-    openDB(dbPath, dbOptions);
-  }
-
-  private void openDB(File dbPath, Options options) throws IOException {
-    db = JniDBFactory.factory.open(dbPath, options);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("LevelDB successfully opened");
-      LOG.debug("[Option] cacheSize = " + options.cacheSize());
-      LOG.debug("[Option] createIfMissing = " + options.createIfMissing());
-      LOG.debug("[Option] blockSize = " + options.blockSize());
-      LOG.debug("[Option] compressionType= " + options.compressionType());
-      LOG.debug("[Option] maxOpenFiles= " + options.maxOpenFiles());
-      LOG.debug("[Option] writeBufferSize= "+ options.writeBufferSize());
-    }
-  }
-
-  /**
-   * Puts a Key into file.
-   *
-   * @param key   - key
-   * @param value - value
-   */
-  @Override
-  public void put(byte[] key, byte[] value) {
-    db.put(key, value, writeOptions);
-  }
-
-  /**
-   * Get Key.
-   *
-   * @param key key
-   * @return value
-   */
-  @Override
-  public byte[] get(byte[] key) {
-    return db.get(key);
-  }
-
-  /**
-   * Delete Key.
-   *
-   * @param key - Key
-   */
-  @Override
-  public void delete(byte[] key) {
-    db.delete(key);
-  }
-
-  /**
-   * Closes the DB.
-   *
-   * @throws IOException
-   */
-  @Override
-  public void close() throws IOException {
-    if (db != null){
-      db.close();
-    }
-  }
-
-  /**
-   * Returns true if the DB is empty.
-   *
-   * @return boolean
-   * @throws IOException
-   */
-  @Override
-  public boolean isEmpty() throws IOException {
-    try (DBIterator iter = db.iterator()) {
-      iter.seekToFirst();
-      boolean hasNext = !iter.hasNext();
-      return hasNext;
-    }
-  }
-
-  /**
-   * Returns the actual levelDB object.
-   * @return DB handle.
-   */
-  public DB getDB() {
-    return db;
-  }
-
-  /**
-   * Returns an iterator on all the key-value pairs in the DB.
-   * @return an iterator on DB entries.
-   */
-  public DBIterator getIterator() {
-    return db.iterator();
-  }
-
-
-  @Override
-  public void destroy() throws IOException {
-    close();
-    JniDBFactory.factory.destroy(dbFile, dbOptions);
-  }
-
-  @Override
-  public ImmutablePair<byte[], byte[]> peekAround(int offset,
-      byte[] from) throws IOException, IllegalArgumentException {
-    try (DBIterator it = db.iterator()) {
-      if (from == null) {
-        it.seekToFirst();
-      } else {
-        it.seek(from);
-      }
-      if (!it.hasNext()) {
-        return null;
-      }
-      switch (offset) {
-      case 0:
-        Entry<byte[], byte[]> current = it.next();
-        return new ImmutablePair<>(current.getKey(), current.getValue());
-      case 1:
-        if (it.next() != null && it.hasNext()) {
-          Entry<byte[], byte[]> next = it.peekNext();
-          return new ImmutablePair<>(next.getKey(), next.getValue());
-        }
-        break;
-      case -1:
-        if (it.hasPrev()) {
-          Entry<byte[], byte[]> prev = it.peekPrev();
-          return new ImmutablePair<>(prev.getKey(), prev.getValue());
-        }
-        break;
-      default:
-        throw new IllegalArgumentException(
-            "Position can only be -1, 0 " + "or 1, but found " + offset);
-      }
-    }
-    return null;
-  }
-
-  @Override
-  public void iterate(byte[] from, EntryConsumer consumer)
-      throws IOException {
-    try (DBIterator iter = db.iterator()) {
-      if (from != null) {
-        iter.seek(from);
-      } else {
-        iter.seekToFirst();
-      }
-      while (iter.hasNext()) {
-        Entry<byte[], byte[]> current = iter.next();
-        if (!consumer.consume(current.getKey(),
-            current.getValue())) {
-          break;
-        }
-      }
-    }
-  }
-
-  /**
-   * Compacts the DB by removing deleted keys etc.
-   * @throws IOException if there is an error.
-   */
-  @Override
-  public void compactDB() throws IOException {
-    if(db != null) {
-      // From LevelDB docs : begin == null and end == null means the whole DB.
-      db.compactRange(null, null);
-    }
-  }
-
-  @Override
-  public void writeBatch(BatchOperation operation) throws IOException {
-    List<BatchOperation.SingleOperation> operations =
-        operation.getOperations();
-    if (!operations.isEmpty()) {
-      try (WriteBatch writeBatch = db.createWriteBatch()) {
-        for (BatchOperation.SingleOperation opt : operations) {
-          switch (opt.getOpt()) {
-          case DELETE:
-            writeBatch.delete(opt.getKey());
-            break;
-          case PUT:
-            writeBatch.put(opt.getKey(), opt.getValue());
-            break;
-          default:
-            throw new IllegalArgumentException("Invalid operation "
-                + opt.getOpt());
-          }
-        }
-        db.write(writeBatch);
-      }
-    }
-  }
-
-  @Override
-  public List<Map.Entry<byte[], byte[]>> getRangeKVs(byte[] startKey,
-      int count, MetadataKeyFilters.MetadataKeyFilter... filters)
-      throws IOException, IllegalArgumentException {
-    return getRangeKVs(startKey, count, false, filters);
-  }
-
-  @Override
-  public List<Map.Entry<byte[], byte[]>> getSequentialRangeKVs(byte[] startKey,
-      int count, MetadataKeyFilters.MetadataKeyFilter... filters)
-      throws IOException, IllegalArgumentException {
-    return getRangeKVs(startKey, count, true, filters);
-  }
-
-  /**
-   * Returns a certain range of key value pairs as a list based on a
-   * startKey or count. Further a {@link MetadataKeyFilter} can be added to
-   * filter keys if necessary. To prevent race conditions while listing
-   * entries, this implementation takes a snapshot and lists the entries from
-   * the snapshot. This may, on the other hand, cause the range result slight
-   * different with actual data if data is updating concurrently.
-   * <p>
-   * If the startKey is specified and found in levelDB, this key and the keys
-   * after this key will be included in the result. If the startKey is null
-   * all entries will be included as long as other conditions are satisfied.
-   * If the given startKey doesn't exist, an empty list will be returned.
-   * <p>
-   * The count argument is to limit number of total entries to return,
-   * the value for count must be an integer greater than 0.
-   * <p>
-   * This method allows to specify one or more {@link MetadataKeyFilter}
-   * to filter keys by certain condition. Once given, only the entries
-   * whose key passes all the filters will be included in the result.
-   *
-   * @param startKey a start key.
-   * @param count max number of entries to return.
-   * @param filters customized one or more {@link MetadataKeyFilter}.
-   * @return a list of entries found in the database or an empty list if the
-   * startKey is invalid.
-   * @throws IOException if there are I/O errors.
-   * @throws IllegalArgumentException if count is less than 0.
-   */
-  private List<Entry<byte[], byte[]>> getRangeKVs(byte[] startKey,
-      int count, boolean sequential, MetadataKeyFilter... filters)
-      throws IOException {
-    List<Entry<byte[], byte[]>> result = new ArrayList<>();
-    long start = System.currentTimeMillis();
-    if (count < 0) {
-      throw new IllegalArgumentException(
-          "Invalid count given " + count + ", count must be greater than 0");
-    }
-    Snapshot snapShot = null;
-    DBIterator dbIter = null;
-    try {
-      snapShot = db.getSnapshot();
-      ReadOptions readOptions = new ReadOptions().snapshot(snapShot);
-      dbIter = db.iterator(readOptions);
-      if (startKey == null) {
-        dbIter.seekToFirst();
-      } else {
-        if (db.get(startKey) == null) {
-          // Key not found, return empty list
-          return result;
-        }
-        dbIter.seek(startKey);
-      }
-      while (dbIter.hasNext() && result.size() < count) {
-        byte[] preKey = dbIter.hasPrev() ? dbIter.peekPrev().getKey() : null;
-        byte[] nextKey = dbIter.hasNext() ? dbIter.peekNext().getKey() : null;
-        Entry<byte[], byte[]> current = dbIter.next();
-
-        if (filters == null) {
-          result.add(current);
-        } else {
-          if (Arrays.asList(filters).stream().allMatch(
-              entry -> entry.filterKey(preKey, current.getKey(), nextKey))) {
-            result.add(current);
-          } else {
-            if (result.size() > 0 && sequential) {
-              // if the caller asks for a sequential range of results,
-              // and we met a dis-match, abort iteration from here.
-              // if result is empty, we continue to look for the first match.
-              break;
-            }
-          }
-        }
-      }
-    } finally {
-      if (snapShot != null) {
-        snapShot.close();
-      }
-      if (dbIter != null) {
-        dbIter.close();
-      }
-      if (LOG.isDebugEnabled()) {
-        if (filters != null) {
-          for (MetadataKeyFilters.MetadataKeyFilter filter : filters) {
-            int scanned = filter.getKeysScannedNum();
-            int hinted = filter.getKeysHintedNum();
-            if (scanned > 0 || hinted > 0) {
-              LOG.debug(
-                  "getRangeKVs ({}) numOfKeysScanned={}, numOfKeysHinted={}",
-                  filter.getClass().getSimpleName(), 
filter.getKeysScannedNum(),
-                  filter.getKeysHintedNum());
-            }
-          }
-        }
-        long end = System.currentTimeMillis();
-        long timeConsumed = end - start;
-        LOG.debug("Time consumed for getRangeKVs() is {}ms,"
-            + " result length is {}.", timeConsumed, result.size());
-      }
-    }
-    return result;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
deleted file mode 100644
index 3ff0a94..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.utils;
-
-import com.google.common.base.Strings;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ozone.OzoneConsts;
-
-/**
- * An utility class to filter levelDB keys.
- */
-public final class MetadataKeyFilters {
-
-  private static KeyPrefixFilter deletingKeyFilter =
-      new MetadataKeyFilters.KeyPrefixFilter(OzoneConsts.DELETING_KEY_PREFIX);
-
-  private static KeyPrefixFilter normalKeyFilter =
-      new MetadataKeyFilters.KeyPrefixFilter(OzoneConsts.DELETING_KEY_PREFIX,
-          true);
-
-  private MetadataKeyFilters() {
-  }
-
-  public static KeyPrefixFilter getDeletingKeyFilter() {
-    return deletingKeyFilter;
-  }
-
-  public static KeyPrefixFilter getNormalKeyFilter() {
-    return normalKeyFilter;
-  }
-  /**
-   * Interface for levelDB key filters.
-   */
-  public interface MetadataKeyFilter {
-    /**
-     * Filter levelDB key with a certain condition.
-     *
-     * @param preKey     previous key.
-     * @param currentKey current key.
-     * @param nextKey    next key.
-     * @return true if a certain condition satisfied, return false otherwise.
-     */
-    boolean filterKey(byte[] preKey, byte[] currentKey, byte[] nextKey);
-
-    default int getKeysScannedNum() {
-      return 0;
-    }
-
-    default int getKeysHintedNum() {
-      return 0;
-    }
-  }
-
-  /**
-   * Utility class to filter key by a string prefix. This filter
-   * assumes keys can be parsed to a string.
-   */
-  public static class KeyPrefixFilter implements MetadataKeyFilter {
-
-    private String keyPrefix = null;
-    private int keysScanned = 0;
-    private int keysHinted = 0;
-    private Boolean negative;
-
-    public KeyPrefixFilter(String keyPrefix) {
-      this(keyPrefix, false);
-    }
-
-    public KeyPrefixFilter(String keyPrefix, boolean negative) {
-      this.keyPrefix = keyPrefix;
-      this.negative = negative;
-    }
-
-    @Override
-    public boolean filterKey(byte[] preKey, byte[] currentKey,
-        byte[] nextKey) {
-      keysScanned++;
-      boolean accept = false;
-      if (Strings.isNullOrEmpty(keyPrefix)) {
-        accept = true;
-      } else {
-        if (currentKey != null &&
-            DFSUtil.bytes2String(currentKey).startsWith(keyPrefix)) {
-          keysHinted++;
-          accept = true;
-        } else {
-          accept = false;
-        }
-      }
-      return (negative) ? !accept : accept;
-    }
-
-    @Override
-    public int getKeysScannedNum() {
-      return keysScanned;
-    }
-
-    @Override
-    public int getKeysHintedNum() {
-      return keysHinted;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/MetadataStore.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/MetadataStore.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/MetadataStore.java
deleted file mode 100644
index b90b08f..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/MetadataStore.java
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.utils;
-
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Interface for key-value store that stores ozone metadata.
- * Ozone metadata is stored as key value pairs, both key and value
- * are arbitrary byte arrays.
- */
[email protected]
-public interface MetadataStore extends Closeable{
-
-  /**
-   * Puts a key-value pair into the store.
-   *
-   * @param key metadata key
-   * @param value metadata value
-   */
-  void put(byte[] key, byte[] value) throws IOException;
-
-  /**
-   * @return true if the metadata store is empty.
-   *
-   * @throws IOException
-   */
-  boolean isEmpty() throws IOException;
-
-  /**
-   * Returns the value mapped to the given key in byte array.
-   *
-   * @param key metadata key
-   * @return value in byte array
-   * @throws IOException
-   */
-  byte[] get(byte[] key) throws IOException;
-
-  /**
-   * Deletes a key from the metadata store.
-   *
-   * @param key metadata key
-   * @throws IOException
-   */
-  void delete(byte[] key) throws IOException;
-
-  /**
-   * Returns a certain range of key value pairs as a list based on a
-   * startKey or count. Further a {@link MetadataKeyFilter} can be added to
-   * filter keys if necessary. To prevent race conditions while listing
-   * entries, this implementation takes a snapshot and lists the entries from
-   * the snapshot. This may, on the other hand, cause the range result slight
-   * different with actual data if data is updating concurrently.
-   * <p>
-   * If the startKey is specified and found in levelDB, this key and the keys
-   * after this key will be included in the result. If the startKey is null
-   * all entries will be included as long as other conditions are satisfied.
-   * If the given startKey doesn't exist and empty list will be returned.
-   * <p>
-   * The count argument is to limit number of total entries to return,
-   * the value for count must be an integer greater than 0.
-   * <p>
-   * This method allows to specify one or more {@link MetadataKeyFilter}
-   * to filter keys by certain condition. Once given, only the entries
-   * whose key passes all the filters will be included in the result.
-   *
-   * @param startKey a start key.
-   * @param count max number of entries to return.
-   * @param filters customized one or more {@link MetadataKeyFilter}.
-   * @return a list of entries found in the database or an empty list if the
-   * startKey is invalid.
-   * @throws IOException if there are I/O errors.
-   * @throws IllegalArgumentException if count is less than 0.
-   */
-  List<Map.Entry<byte[], byte[]>> getRangeKVs(byte[] startKey,
-      int count, MetadataKeyFilter... filters)
-      throws IOException, IllegalArgumentException;
-
-  /**
-   * This method is very similar to {@link #getRangeKVs}, the only
-   * different is this method is supposed to return a sequential range
-   * of elements based on the filters. While iterating the elements,
-   * if it met any entry that cannot pass the filter, the iterator will stop
-   * from this point without looking for next match. If no filter is given,
-   * this method behaves just like {@link #getRangeKVs}.
-   *
-   * @param startKey a start key.
-   * @param count max number of entries to return.
-   * @param filters customized one or more {@link MetadataKeyFilter}.
-   * @return a list of entries found in the database.
-   * @throws IOException
-   * @throws IllegalArgumentException
-   */
-  List<Map.Entry<byte[], byte[]>> getSequentialRangeKVs(byte[] startKey,
-      int count, MetadataKeyFilter... filters)
-      throws IOException, IllegalArgumentException;
-
-  /**
-   * A batch of PUT, DELETE operations handled as a single atomic write.
-   *
-   * @throws IOException write fails
-   */
-  void writeBatch(BatchOperation operation) throws IOException;
-
-  /**
-   * Compact the entire database.
-   * @throws IOException
-   */
-  void compactDB() throws IOException;
-
-  /**
-   * Destroy the content of the specified database,
-   * a destroyed database will not be able to load again.
-   * Be very careful with this method.
-   *
-   * @throws IOException if I/O error happens
-   */
-  void destroy() throws IOException;
-
-  /**
-   * Seek the database to a certain key, returns the key-value
-   * pairs around this key based on the given offset. Note, this method
-   * can only support offset -1 (left), 0 (current) and 1 (right),
-   * any other offset given will cause a {@link IllegalArgumentException}.
-   *
-   * @param offset offset to the key
-   * @param from from which key
-   * @return a key-value pair
-   * @throws IOException
-   */
-  ImmutablePair<byte[], byte[]> peekAround(int offset, byte[] from)
-      throws IOException, IllegalArgumentException;
-
-  /**
-   * Iterates entries in the database from a certain key.
-   * Applies the given {@link EntryConsumer} to the key and value of
-   * each entry, the function produces a boolean result which is used
-   * as the criteria to exit from iteration.
-   *
-   * @param from the start key
-   * @param consumer
-   *   a {@link EntryConsumer} applied to each key and value. If the consumer
-   *   returns true, continues the iteration to next entry; otherwise exits
-   *   the iteration.
-   * @throws IOException
-   */
-  void iterate(byte[] from, EntryConsumer consumer)
-      throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
deleted file mode 100644
index 095e718..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.utils;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_METADATA_STORE_ROCKSDB_STATISTICS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF;
-import org.iq80.leveldb.Options;
-import org.rocksdb.BlockBasedTableConfig;
-
-import java.io.File;
-import java.io.IOException;
-
-import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB;
-import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB;
-
-import org.rocksdb.Statistics;
-import org.rocksdb.StatsLevel;
-
-/**
- * Builder for metadata store.
- */
-public class MetadataStoreBuilder {
-
-  private File dbFile;
-  private long cacheSize;
-  private boolean createIfMissing = true;
-  private Configuration conf;
-
-  public static MetadataStoreBuilder newBuilder() {
-    return new MetadataStoreBuilder();
-  }
-
-  public MetadataStoreBuilder setDbFile(File dbPath) {
-    this.dbFile = dbPath;
-    return this;
-  }
-
-  public MetadataStoreBuilder setCacheSize(long cache) {
-    this.cacheSize = cache;
-    return this;
-  }
-
-  public MetadataStoreBuilder setCreateIfMissing(boolean doCreate) {
-    this.createIfMissing = doCreate;
-    return this;
-  }
-
-  public MetadataStoreBuilder setConf(Configuration configuration) {
-    this.conf = configuration;
-    return this;
-  }
-
-  public MetadataStore build() throws IOException {
-    if (dbFile == null) {
-      throw new IllegalArgumentException("Failed to build metadata store, "
-          + "dbFile is required but not found");
-    }
-
-    // Build db store based on configuration
-    MetadataStore store = null;
-    String impl = conf == null ?
-        OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT :
-        conf.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL,
-            OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT);
-    if (OZONE_METADATA_STORE_IMPL_LEVELDB.equals(impl)) {
-      Options options = new Options();
-      options.createIfMissing(createIfMissing);
-      if (cacheSize > 0) {
-        options.cacheSize(cacheSize);
-      }
-      store = new LevelDBStore(dbFile, options);
-    } else if (OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(impl)) {
-      org.rocksdb.Options opts = new org.rocksdb.Options();
-      opts.setCreateIfMissing(createIfMissing);
-
-      if (cacheSize > 0) {
-        BlockBasedTableConfig tableConfig = new BlockBasedTableConfig();
-        tableConfig.setBlockCacheSize(cacheSize);
-        opts.setTableFormatConfig(tableConfig);
-      }
-
-      String rocksDbStat = conf == null ?
-          OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT :
-          conf.getTrimmed(OZONE_METADATA_STORE_ROCKSDB_STATISTICS,
-              OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT);
-
-      if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) {
-        Statistics statistics = new Statistics();
-        statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat));
-        opts = opts.setStatistics(statistics);
-
-      }
-      store = new RocksDBStore(dbFile, opts);
-    } else {
-      throw new IllegalArgumentException("Invalid argument for "
-          + OzoneConfigKeys.OZONE_METADATA_STORE_IMPL
-          + ". Expecting " + OZONE_METADATA_STORE_IMPL_LEVELDB
-          + " or " + OZONE_METADATA_STORE_IMPL_ROCKSDB
-          + ", but met " + impl);
-    }
-    return store;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
deleted file mode 100644
index 2f340a5..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
+++ /dev/null
@@ -1,382 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.utils;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.ratis.shaded.com.google.common.annotations.VisibleForTesting;
-import org.rocksdb.RocksIterator;
-import org.rocksdb.Options;
-import org.rocksdb.WriteOptions;
-import org.rocksdb.RocksDB;
-import org.rocksdb.RocksDBException;
-import org.rocksdb.WriteBatch;
-import org.rocksdb.DbPath;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.management.ObjectName;
-import java.io.File;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.AbstractMap;
-
-/**
- * RocksDB implementation of ozone metadata store.
- */
-public class RocksDBStore implements MetadataStore {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(RocksDBStore.class);
-
-  private RocksDB db = null;
-  private File dbLocation;
-  private WriteOptions writeOptions;
-  private Options dbOptions;
-  private ObjectName statMBeanName;
-
-  public RocksDBStore(File dbFile, Options options)
-      throws IOException {
-    Preconditions.checkNotNull(dbFile, "DB file location cannot be null");
-    RocksDB.loadLibrary();
-    dbOptions = options;
-    dbLocation = dbFile;
-    writeOptions = new WriteOptions();
-    try {
-
-      db = RocksDB.open(dbOptions, dbLocation.getAbsolutePath());
-      if (dbOptions.statistics() != null) {
-
-        Map<String, String> jmxProperties = new HashMap<String, String>();
-        jmxProperties.put("dbName", dbFile.getName());
-        statMBeanName = MBeans.register("Ozone", "RocksDbStore", jmxProperties,
-            new RocksDBStoreMBean(dbOptions.statistics()));
-        if (statMBeanName == null) {
-          LOG.warn("jmx registration failed during RocksDB init, db path :{}",
-              dbFile.getAbsolutePath());
-        }
-      }
-    } catch (RocksDBException e) {
-      throw new IOException(
-          "Failed init RocksDB, db path : " + dbFile.getAbsolutePath(), e);
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("RocksDB successfully opened.");
-      LOG.debug("[Option] dbLocation= {}", dbLocation.getAbsolutePath());
-      LOG.debug("[Option] createIfMissing = {}", options.createIfMissing());
-      LOG.debug("[Option] compactionPriority= {}", options.compactionStyle());
-      LOG.debug("[Option] compressionType= {}", options.compressionType());
-      LOG.debug("[Option] maxOpenFiles= {}", options.maxOpenFiles());
-      LOG.debug("[Option] writeBufferSize= {}", options.writeBufferSize());
-    }
-  }
-
-  private IOException toIOException(String msg, RocksDBException e) {
-    String statusCode = e.getStatus() == null ? "N/A" :
-        e.getStatus().getCodeString();
-    String errMessage = e.getMessage() == null ? "Unknown error" :
-        e.getMessage();
-    String output = msg + "; status : " + statusCode
-        + "; message : " + errMessage;
-    return new IOException(output, e);
-  }
-
-  @Override
-  public void put(byte[] key, byte[] value) throws IOException {
-    try {
-      db.put(writeOptions, key, value);
-    } catch (RocksDBException e) {
-      throw toIOException("Failed to put key-value to metadata store", e);
-    }
-  }
-
-  @Override
-  public boolean isEmpty() throws IOException {
-    RocksIterator it = null;
-    try {
-      it = db.newIterator();
-      it.seekToFirst();
-      return !it.isValid();
-    } finally {
-      if (it != null) {
-        it.close();
-      }
-    }
-  }
-
-  @Override
-  public byte[] get(byte[] key) throws IOException {
-    try {
-      return db.get(key);
-    } catch (RocksDBException e) {
-      throw toIOException("Failed to get the value for the given key", e);
-    }
-  }
-
-  @Override
-  public void delete(byte[] key) throws IOException {
-    try {
-      db.delete(key);
-    } catch (RocksDBException e) {
-      throw toIOException("Failed to delete the given key", e);
-    }
-  }
-
-  @Override
-  public List<Map.Entry<byte[], byte[]>> getRangeKVs(byte[] startKey,
-      int count, MetadataKeyFilters.MetadataKeyFilter... filters)
-      throws IOException, IllegalArgumentException {
-    return getRangeKVs(startKey, count, false, filters);
-  }
-
-  @Override
-  public List<Map.Entry<byte[], byte[]>> getSequentialRangeKVs(byte[] startKey,
-      int count, MetadataKeyFilters.MetadataKeyFilter... filters)
-      throws IOException, IllegalArgumentException {
-    return getRangeKVs(startKey, count, true, filters);
-  }
-
-  private List<Map.Entry<byte[], byte[]>> getRangeKVs(byte[] startKey,
-      int count, boolean sequential,
-      MetadataKeyFilters.MetadataKeyFilter... filters)
-      throws IOException, IllegalArgumentException {
-    List<Map.Entry<byte[], byte[]>> result = new ArrayList<>();
-    long start = System.currentTimeMillis();
-    if (count < 0) {
-      throw new IllegalArgumentException(
-          "Invalid count given " + count + ", count must be greater than 0");
-    }
-    RocksIterator it = null;
-    try {
-      it = db.newIterator();
-      if (startKey == null) {
-        it.seekToFirst();
-      } else {
-        if(get(startKey) == null) {
-          // Key not found, return empty list
-          return result;
-        }
-        it.seek(startKey);
-      }
-      while(it.isValid() && result.size() < count) {
-        byte[] currentKey = it.key();
-        byte[] currentValue = it.value();
-
-        it.prev();
-        final byte[] prevKey = it.isValid() ? it.key() : null;
-
-        it.seek(currentKey);
-        it.next();
-        final byte[] nextKey = it.isValid() ? it.key() : null;
-
-        if (filters == null) {
-          result.add(new AbstractMap.SimpleImmutableEntry<>(currentKey,
-              currentValue));
-        } else {
-          if (Arrays.asList(filters).stream()
-              .allMatch(entry -> entry.filterKey(prevKey,
-                  currentKey, nextKey))) {
-            result.add(new AbstractMap.SimpleImmutableEntry<>(currentKey,
-                currentValue));
-          } else {
-            if (result.size() > 0 && sequential) {
-              // if the caller asks for a sequential range of results,
-              // and we met a dis-match, abort iteration from here.
-              // if result is empty, we continue to look for the first match.
-              break;
-            }
-          }
-        }
-      }
-    } finally {
-      if (it != null) {
-        it.close();
-      }
-      long end = System.currentTimeMillis();
-      long timeConsumed = end - start;
-      if (LOG.isDebugEnabled()) {
-        if (filters != null) {
-          for (MetadataKeyFilters.MetadataKeyFilter filter : filters) {
-            int scanned = filter.getKeysScannedNum();
-            int hinted = filter.getKeysHintedNum();
-            if (scanned > 0 || hinted > 0) {
-              LOG.debug(
-                  "getRangeKVs ({}) numOfKeysScanned={}, numOfKeysHinted={}",
-                  filter.getClass().getSimpleName(), 
filter.getKeysScannedNum(),
-                  filter.getKeysHintedNum());
-            }
-          }
-        }
-        LOG.debug("Time consumed for getRangeKVs() is {}ms,"
-            + " result length is {}.", timeConsumed, result.size());
-      }
-    }
-    return result;
-  }
-
-  @Override
-  public void writeBatch(BatchOperation operation)
-      throws IOException {
-    List<BatchOperation.SingleOperation> operations =
-        operation.getOperations();
-    if (!operations.isEmpty()) {
-      try (WriteBatch writeBatch = new WriteBatch()) {
-        for (BatchOperation.SingleOperation opt : operations) {
-          switch (opt.getOpt()) {
-          case DELETE:
-            writeBatch.remove(opt.getKey());
-            break;
-          case PUT:
-            writeBatch.put(opt.getKey(), opt.getValue());
-            break;
-          default:
-            throw new IllegalArgumentException("Invalid operation "
-                + opt.getOpt());
-          }
-        }
-        db.write(writeOptions, writeBatch);
-      } catch (RocksDBException e) {
-        throw toIOException("Batch write operation failed", e);
-      }
-    }
-  }
-
-  @Override
-  public void compactDB() throws IOException {
-    if (db != null) {
-      try {
-        db.compactRange();
-      } catch (RocksDBException e) {
-        throw toIOException("Failed to compact db", e);
-      }
-    }
-  }
-
-  private void deleteQuietly(File fileOrDir) {
-    if (fileOrDir != null && fileOrDir.exists()) {
-      try {
-        FileUtils.forceDelete(fileOrDir);
-      } catch (IOException e) {
-        LOG.warn("Failed to delete dir {}", fileOrDir.getAbsolutePath(), e);
-      }
-    }
-  }
-
-  @Override
-  public void destroy() throws IOException {
-    // Make sure db is closed.
-    close();
-
-    // There is no destroydb java API available,
-    // equivalently we can delete all db directories.
-    deleteQuietly(dbLocation);
-    deleteQuietly(new File(dbOptions.dbLogDir()));
-    deleteQuietly(new File(dbOptions.walDir()));
-    List<DbPath> dbPaths = dbOptions.dbPaths();
-    if (dbPaths != null) {
-      dbPaths.forEach(dbPath -> {
-        deleteQuietly(new File(dbPath.toString()));
-      });
-    }
-  }
-
-  @Override
-  public ImmutablePair<byte[], byte[]> peekAround(int offset,
-      byte[] from) throws IOException, IllegalArgumentException {
-    RocksIterator it = null;
-    try {
-      it = db.newIterator();
-      if (from == null) {
-        it.seekToFirst();
-      } else {
-        it.seek(from);
-      }
-      if (!it.isValid()) {
-        return null;
-      }
-
-      switch (offset) {
-      case 0:
-        break;
-      case 1:
-        it.next();
-        break;
-      case -1:
-        it.prev();
-        break;
-      default:
-        throw new IllegalArgumentException(
-            "Position can only be -1, 0 " + "or 1, but found " + offset);
-      }
-      return it.isValid() ? new ImmutablePair<>(it.key(), it.value()) : null;
-    } finally {
-      if (it != null) {
-        it.close();
-      }
-    }
-  }
-
-  @Override
-  public void iterate(byte[] from, EntryConsumer consumer)
-      throws IOException {
-    RocksIterator it = null;
-    try {
-      it = db.newIterator();
-      if (from != null) {
-        it.seek(from);
-      } else {
-        it.seekToFirst();
-      }
-      while (it.isValid()) {
-        if (!consumer.consume(it.key(), it.value())) {
-          break;
-        }
-        it.next();
-      }
-    } finally {
-      if (it != null) {
-        it.close();
-      }
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-    if (statMBeanName != null) {
-      MBeans.unregister(statMBeanName);
-    }
-    if (db != null) {
-      db.close();
-    }
-
-  }
-
-  @VisibleForTesting
-  protected ObjectName getStatMBeanName() {
-    return statMBeanName;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/RocksDBStoreMBean.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/RocksDBStoreMBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/RocksDBStoreMBean.java
deleted file mode 100644
index df0f41c..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/RocksDBStoreMBean.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.utils;
-
-import org.rocksdb.HistogramData;
-import org.rocksdb.HistogramType;
-import org.rocksdb.Statistics;
-import org.rocksdb.TickerType;
-
-import javax.management.Attribute;
-import javax.management.AttributeList;
-import javax.management.AttributeNotFoundException;
-import javax.management.DynamicMBean;
-import javax.management.InvalidAttributeValueException;
-import javax.management.MBeanAttributeInfo;
-import javax.management.MBeanException;
-import javax.management.MBeanInfo;
-import javax.management.ReflectionException;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.stream.Stream;
-
-/**
- * Adapter JMX bean to publish all the Rocksdb metrics.
- */
-public class RocksDBStoreMBean implements DynamicMBean {
-
-  private Statistics statistics;
-
-  private Set<String> histogramAttributes = new HashSet<>();
-
-  public RocksDBStoreMBean(Statistics statistics) {
-    this.statistics = statistics;
-    histogramAttributes.add("Average");
-    histogramAttributes.add("Median");
-    histogramAttributes.add("Percentile95");
-    histogramAttributes.add("Percentile99");
-    histogramAttributes.add("StandardDeviation");
-  }
-
-  @Override
-  public Object getAttribute(String attribute)
-      throws AttributeNotFoundException, MBeanException, ReflectionException {
-    for (String histogramAttribute : histogramAttributes) {
-      if (attribute.endsWith("_" + histogramAttribute.toUpperCase())) {
-        String keyName = attribute
-            .substring(0, attribute.length() - histogramAttribute.length() -1);
-        try {
-          HistogramData histogram =
-              statistics.getHistogramData(HistogramType.valueOf(keyName));
-          try {
-            Method method =
-                HistogramData.class.getMethod("get" + histogramAttribute);
-            return method.invoke(histogram);
-          } catch (Exception e) {
-            throw new ReflectionException(e,
-                "Can't read attribute " + attribute);
-          }
-        } catch (IllegalArgumentException exception) {
-          throw new AttributeNotFoundException(
-              "No such attribute in RocksDB stats: " + attribute);
-        }
-      }
-    }
-    try {
-      return statistics.getTickerCount(TickerType.valueOf(attribute));
-    } catch (IllegalArgumentException ex) {
-      throw new AttributeNotFoundException(
-          "No such attribute in RocksDB stats: " + attribute);
-    }
-  }
-
-  @Override
-  public void setAttribute(Attribute attribute)
-      throws AttributeNotFoundException, InvalidAttributeValueException,
-      MBeanException, ReflectionException {
-
-  }
-
-  @Override
-  public AttributeList getAttributes(String[] attributes) {
-    AttributeList result = new AttributeList();
-    for (String attributeName : attributes) {
-      try {
-        Object value = getAttribute(attributeName);
-        result.add(value);
-      } catch (Exception e) {
-        //TODO
-      }
-    }
-    return result;
-  }
-
-  @Override
-  public AttributeList setAttributes(AttributeList attributes) {
-    return null;
-  }
-
-  @Override
-  public Object invoke(String actionName, Object[] params, String[] signature)
-      throws MBeanException, ReflectionException {
-    return null;
-  }
-
-  @Override
-  public MBeanInfo getMBeanInfo() {
-
-    List<MBeanAttributeInfo> attributes = new ArrayList<>();
-    for (TickerType tickerType : TickerType.values()) {
-      attributes.add(new MBeanAttributeInfo(tickerType.name(), "long",
-          "RocksDBStat: " + tickerType.name(), true, false, false));
-    }
-    for (HistogramType histogramType : HistogramType.values()) {
-      for (String histogramAttribute : histogramAttributes) {
-        attributes.add(new MBeanAttributeInfo(
-            histogramType.name() + "_" + histogramAttribute.toUpperCase(),
-            "long", "RocksDBStat: " + histogramType.name(), true, false,
-            false));
-      }
-    }
-
-    return new MBeanInfo("", "RocksDBStat",
-        attributes.toArray(new MBeanAttributeInfo[0]), null, null, null);
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/package-info.java
deleted file mode 100644
index 4466337..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/utils/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.utils;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/CBlockClientServerProtocol.proto
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/CBlockClientServerProtocol.proto
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/CBlockClientServerProtocol.proto
deleted file mode 100644
index 5e3e0c7..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/CBlockClientServerProtocol.proto
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and unstable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *unstable* .proto interface.
- */
-option java_package = "org.apache.hadoop.cblock.protocol.proto";
-option java_outer_classname = "CBlockClientServerProtocolProtos";
-option java_generic_services = true;
-option java_generate_equals_and_hash = true;
-package hadoop.cblock;
-
-import "Ozone.proto";
-import "CBlockServiceProtocol.proto";
-/**
-* This message is sent from CBlock client side to CBlock server to
-* mount a volume specified by owner name and volume name.
-*
-* Right now, this is the only communication between client and server.
-* After the volume is mounted, CBlock client will talk to containers
-* by itself, nothing to do with CBlock server.
-*/
-message MountVolumeRequestProto {
-    required string userName = 1;
-    required string volumeName = 2;
-}
-
-/**
-* This message is sent from CBlock server to CBlock client as response
-* of mount a volume. It checks the whether the volume is valid to access
-* at all.(e.g. volume exist)
-*
-* And include enough information (volume size, block size, list of
-* containers for this volume) for client side to perform read/write on
-* the volume.
-*/
-message MountVolumeResponseProto {
-    required bool isValid = 1;
-    optional string userName = 2;
-    optional string volumeName = 3;
-    optional uint64 volumeSize = 4;
-    optional uint32 blockSize = 5;
-    repeated ContainerIDProto allContainerIDs = 6;
-}
-
-/**
-* This message include ID of container which can be used to locate the
-* container. Since the order of containers needs to be maintained, also
-* includes a index field to verify the correctness of the order.
-*/
-message ContainerIDProto {
-    required string containerID = 1;
-    required uint64 index = 2;
-    // making pipeline optional to be compatible with exisiting tests
-    optional hadoop.hdfs.ozone.Pipeline pipeline = 3;
-}
-
-
-message ListVolumesRequestProto {
-
-}
-
-message ListVolumesResponseProto {
-    repeated VolumeInfoProto volumeEntry = 1;
-}
-
-
-service CBlockClientServerProtocolService {
-    /**
-    * mount the volume.
-    */
-    rpc mountVolume(MountVolumeRequestProto) returns 
(MountVolumeResponseProto);
-
-    rpc listVolumes(ListVolumesRequestProto) returns(ListVolumesResponseProto);
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/CBlockServiceProtocol.proto
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/CBlockServiceProtocol.proto 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/CBlockServiceProtocol.proto
deleted file mode 100644
index 36e4b59..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/CBlockServiceProtocol.proto
+++ /dev/null
@@ -1,133 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and unstable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *unstable* .proto interface.
- */
-
-option java_package = "org.apache.hadoop.cblock.protocol.proto";
-option java_outer_classname = "CBlockServiceProtocolProtos";
-option java_generic_services = true;
-option java_generate_equals_and_hash = true;
-package hadoop.cblock;
-
-/**
-* This message is sent to CBlock server to create a volume. Creating
-* volume requries four parameters: owner of the volume, name of the volume
-* size of volume and block size of the volume.
-*/
-message CreateVolumeRequestProto {
-    required string userName = 1;
-    required string volumeName = 2;
-    required uint64 volumeSize = 3;
-    optional uint32 blockSize = 4 [default = 4096];
-}
-
-/**
-* Empty response message.
-*/
-message CreateVolumeResponseProto {
-
-}
-
-/**
-* This message is sent to CBlock server to delete a volume. The volume
-* is specified by owner name and volume name. If force is set to
-* false, volume will be deleted only if it is empty. Otherwise delete it
-* regardless.
-*/
-message DeleteVolumeRequestProto {
-    required string userName = 1;
-    required string volumeName = 2;
-    optional bool force = 3;
-}
-
-/**
-* Empty response message.
-*/
-message DeleteVolumeResponseProto {
-
-}
-
-/**
-* This message is sent to CBlock server to request info of a volume. The
-* volume is specified by owner name and volume name.
-*/
-message InfoVolumeRequestProto {
-    required string userName = 1;
-    required string volumeName = 2;
-}
-
-/**
-* This message describes the information of a volume.
-* Currently, the info includes the volume creation parameters and a number
-* as the usage of the volume, in terms of number of bytes.
-*/
-message VolumeInfoProto {
-    required string userName = 1;
-    required string volumeName = 2;
-    required uint64 volumeSize = 3;
-    required uint64 blockSize = 4;
-    optional uint64 usage = 5;
-    // TODO : potentially volume ACL
-}
-
-/**
-* This message is sent from CBlock server as response of info volume request.
-*/
-message InfoVolumeResponseProto {
-    optional VolumeInfoProto volumeInfo = 1;
-}
-
-/**
-* This message is sent to CBlock server to list all available volume.
-*/
-message ListVolumeRequestProto {
-    optional string userName = 1;
-}
-
-/**
-* This message is sent from CBlock server as response of volume listing.
-*/
-message ListVolumeResponseProto {
-    repeated VolumeInfoProto volumeEntry = 1;
-}
-
-service CBlockServiceProtocolService {
-    /**
-    * Create a volume.
-    */
-    rpc createVolume(CreateVolumeRequestProto) 
returns(CreateVolumeResponseProto);
-
-    /**
-    * Delete a volume.
-    */
-    rpc deleteVolume(DeleteVolumeRequestProto) 
returns(DeleteVolumeResponseProto);
-
-    /**
-    * Get info of a volume.
-    */
-    rpc infoVolume(InfoVolumeRequestProto) returns(InfoVolumeResponseProto);
-
-    /**
-    * List all available volumes.
-    */
-    rpc listVolume(ListVolumeRequestProto) returns(ListVolumeResponseProto);
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to