HDFS-10914. Move remnants of oah.hdfs.client to hadoop-hdfs-client.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/92e5e915
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/92e5e915
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/92e5e915

Branch: refs/heads/HDFS-10467
Commit: 92e5e9159850c01635091ea6ded0d8ee76691a9a
Parents: 5f34402
Author: Andrew Wang <w...@apache.org>
Authored: Wed Sep 28 16:00:51 2016 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Wed Sep 28 16:01:03 2016 -0700

----------------------------------------------------------------------
 .../hdfs/client/CreateEncryptionZoneFlag.java   |  70 +++
 .../apache/hadoop/hdfs/client/HdfsAdmin.java    | 523 ++++++++++++++++++
 .../apache/hadoop/hdfs/client/HdfsUtils.java    |  86 +++
 .../apache/hadoop/hdfs/client/package-info.java |  27 +
 .../hdfs/client/CreateEncryptionZoneFlag.java   |  71 ---
 .../apache/hadoop/hdfs/client/HdfsAdmin.java    | 524 -------------------
 .../apache/hadoop/hdfs/client/HdfsUtils.java    |  86 ---
 .../apache/hadoop/hdfs/client/package-info.java |  27 -
 8 files changed, 706 insertions(+), 708 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/92e5e915/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java
new file mode 100644
index 0000000..ad4cea6
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * CreateEncryptionZoneFlag is used in
+ * {@link HdfsAdmin#createEncryptionZone(Path, String, EnumSet)} to indicate
+ * what should be done when creating an encryption zone.
+ *
+ * Use CreateEncryptionZoneFlag as follows:
+ * <ol>
+ *   <li>PROVISION_TRASH - provision a trash directory for the encryption zone
+ *   to support soft delete.</li>
+ * </ol>
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public enum CreateEncryptionZoneFlag {
+
+  /**
+   * Do not provision a trash directory in the encryption zone.
+   *
+   * @see CreateEncryptionZoneFlag#NO_TRASH
+   */
+  NO_TRASH((short) 0x00),
+  /**
+   * Provision a trash directory .Trash/ in the
+   * encryption zone.
+   *
+   * @see CreateEncryptionZoneFlag#PROVISION_TRASH
+   */
+  PROVISION_TRASH((short) 0x01);
+
+  private final short mode;
+
+  CreateEncryptionZoneFlag(short mode) {
+    this.mode = mode;
+  }
+
+  public static CreateEncryptionZoneFlag valueOf(short mode) {
+    for (CreateEncryptionZoneFlag flag : CreateEncryptionZoneFlag.values()) {
+      if (flag.getMode() == mode) {
+        return flag;
+      }
+    }
+    return null;
+  }
+
+  public short getMode() {
+    return mode;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92e5e915/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
new file mode 100644
index 0000000..946b79d
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
@@ -0,0 +1,523 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.client;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URI;
+import java.util.Collection;
+import java.util.EnumSet;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockStoragePolicySpi;
+import org.apache.hadoop.fs.CacheFlag;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSInotifyEventInputStream;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
+import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+
+/**
+ * The public API for performing administrative functions on HDFS. Those 
writing
+ * applications against HDFS should prefer this interface to directly accessing
+ * functionality in DistributedFileSystem or DFSClient.
+ *
+ * Note that this is distinct from the similarly-named DFSAdmin, which
+ * is a class that provides the functionality for the CLI `hdfs dfsadmin ...'
+ * commands.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class HdfsAdmin {
+
+  private DistributedFileSystem dfs;
+  private static final FsPermission TRASH_PERMISSION = new FsPermission(
+      FsAction.ALL, FsAction.ALL, FsAction.ALL, true);
+
+  /**
+   * Create a new HdfsAdmin client.
+   *
+   * @param uri the unique URI of the HDFS file system to administer
+   * @param conf configuration
+   * @throws IOException in the event the file system could not be created
+   */
+  public HdfsAdmin(URI uri, Configuration conf) throws IOException {
+    FileSystem fs = FileSystem.get(uri, conf);
+    if (!(fs instanceof DistributedFileSystem)) {
+      throw new IllegalArgumentException("'" + uri + "' is not an HDFS URI.");
+    } else {
+      dfs = (DistributedFileSystem)fs;
+    }
+  }
+
+  /**
+   * Set the namespace quota (count of files, directories, and sym links) for a
+   * directory.
+   *
+   * @param src the path to set the quota for
+   * @param quota the value to set for the quota
+   * @throws IOException in the event of error
+   */
+  public void setQuota(Path src, long quota) throws IOException {
+    dfs.setQuota(src, quota, HdfsConstants.QUOTA_DONT_SET);
+  }
+
+  /**
+   * Clear the namespace quota (count of files, directories and sym links) for 
a
+   * directory.
+   *
+   * @param src the path to clear the quota of
+   * @throws IOException in the event of error
+   */
+  public void clearQuota(Path src) throws IOException {
+    dfs.setQuota(src, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_DONT_SET);
+  }
+
+  /**
+   * Set the storage space quota (size of files) for a directory. Note that
+   * directories and sym links do not occupy storage space.
+   *
+   * @param src the path to set the space quota of
+   * @param spaceQuota the value to set for the space quota
+   * @throws IOException in the event of error
+   */
+  public void setSpaceQuota(Path src, long spaceQuota) throws IOException {
+    dfs.setQuota(src, HdfsConstants.QUOTA_DONT_SET, spaceQuota);
+  }
+
+  /**
+   * Clear the storage space quota (size of files) for a directory. Note that
+   * directories and sym links do not occupy storage space.
+   *
+   * @param src the path to clear the space quota of
+   * @throws IOException in the event of error
+   */
+  public void clearSpaceQuota(Path src) throws IOException {
+    dfs.setQuota(src, HdfsConstants.QUOTA_DONT_SET, HdfsConstants.QUOTA_RESET);
+  }
+
+  /**
+   * Set the quota by storage type for a directory. Note that
+   * directories and sym links do not occupy storage type quota.
+   *
+   * @param src the target directory to set the quota by storage type
+   * @param type the storage type to set for quota by storage type
+   * @param quota the value to set for quota by storage type
+   * @throws IOException in the event of error
+   */
+  public void setQuotaByStorageType(Path src, StorageType type, long quota)
+      throws IOException {
+    dfs.setQuotaByStorageType(src, type, quota);
+  }
+
+  /**
+   * Clear the space quota by storage type for a directory. Note that
+   * directories and sym links do not occupy storage type quota.
+   *
+   * @param src the target directory to clear the quota by storage type
+   * @param type the storage type to clear for quota by storage type
+   * @throws IOException in the event of error
+   */
+  public void clearQuotaByStorageType(Path src, StorageType type) throws 
IOException {
+    dfs.setQuotaByStorageType(src, type, HdfsConstants.QUOTA_RESET);
+  }
+
+  /**
+   * Allow snapshot on a directory.
+   * @param path The path of the directory where snapshots will be taken.
+   */
+  public void allowSnapshot(Path path) throws IOException {
+    dfs.allowSnapshot(path);
+  }
+
+  /**
+   * Disallow snapshot on a directory.
+   * @param path The path of the snapshottable directory.
+   */
+  public void disallowSnapshot(Path path) throws IOException {
+    dfs.disallowSnapshot(path);
+  }
+
+  /**
+   * Add a new CacheDirectiveInfo.
+   *
+   * @param info Information about a directive to add.
+   * @param flags {@link CacheFlag}s to use for this operation.
+   * @return the ID of the directive that was created.
+   * @throws IOException if the directive could not be added
+   */
+  public long addCacheDirective(CacheDirectiveInfo info,
+      EnumSet<CacheFlag> flags) throws IOException {
+  return dfs.addCacheDirective(info, flags);
+  }
+
+  /**
+   * Modify a CacheDirective.
+   *
+   * @param info Information about the directive to modify. You must set the ID
+   *          to indicate which CacheDirective you want to modify.
+   * @param flags {@link CacheFlag}s to use for this operation.
+   * @throws IOException if the directive could not be modified
+   */
+  public void modifyCacheDirective(CacheDirectiveInfo info,
+      EnumSet<CacheFlag> flags) throws IOException {
+    dfs.modifyCacheDirective(info, flags);
+  }
+
+  /**
+   * Remove a CacheDirective.
+   *
+   * @param id identifier of the CacheDirectiveInfo to remove
+   * @throws IOException if the directive could not be removed
+   */
+  public void removeCacheDirective(long id)
+      throws IOException {
+    dfs.removeCacheDirective(id);
+  }
+
+  /**
+   * List cache directives. Incrementally fetches results from the server.
+   *
+   * @param filter Filter parameters to use when listing the directives, null 
to
+   *               list all directives visible to us.
+   * @return A RemoteIterator which returns CacheDirectiveInfo objects.
+   */
+  public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
+      CacheDirectiveInfo filter) throws IOException {
+    return dfs.listCacheDirectives(filter);
+  }
+
+  /**
+   * Add a cache pool.
+   *
+   * @param info
+   *          The request to add a cache pool.
+   * @throws IOException
+   *          If the request could not be completed.
+   */
+  public void addCachePool(CachePoolInfo info) throws IOException {
+    dfs.addCachePool(info);
+  }
+
+  /**
+   * Modify an existing cache pool.
+   *
+   * @param info
+   *          The request to modify a cache pool.
+   * @throws IOException
+   *          If the request could not be completed.
+   */
+  public void modifyCachePool(CachePoolInfo info) throws IOException {
+    dfs.modifyCachePool(info);
+  }
+
+  /**
+   * Remove a cache pool.
+   *
+   * @param poolName
+   *          Name of the cache pool to remove.
+   * @throws IOException
+   *          if the cache pool did not exist, or could not be removed.
+   */
+  public void removeCachePool(String poolName) throws IOException {
+    dfs.removeCachePool(poolName);
+  }
+
+  /**
+   * List all cache pools.
+   *
+   * @return A remote iterator from which you can get CachePoolEntry objects.
+   *          Requests will be made as needed.
+   * @throws IOException
+   *          If there was an error listing cache pools.
+   */
+  public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
+    return dfs.listCachePools();
+  }
+
+  /**
+   * Create an encryption zone rooted at an empty existing directory, using the
+   * specified encryption key. An encryption zone has an associated encryption
+   * key used when reading and writing files within the zone.
+   *
+   * @param path    The path of the root of the encryption zone. Must refer to
+   *                an empty, existing directory.
+   * @param keyName Name of key available at the KeyProvider.
+   * @throws IOException            if there was a general IO exception
+   * @throws AccessControlException if the caller does not have access to path
+   * @throws FileNotFoundException  if the path does not exist
+   */
+  @Deprecated
+  public void createEncryptionZone(Path path, String keyName)
+      throws IOException, AccessControlException, FileNotFoundException {
+    dfs.createEncryptionZone(path, keyName);
+  }
+
+  /**
+   * Create an encryption zone rooted at an empty existing directory, using the
+   * specified encryption key. An encryption zone has an associated encryption
+   * key used when reading and writing files within the zone.
+   *
+   * Additional options, such as provisioning the trash directory, can be
+   * specified using {@link CreateEncryptionZoneFlag} flags.
+   *
+   * @param path    The path of the root of the encryption zone. Must refer to
+   *                an empty, existing directory.
+   * @param keyName Name of key available at the KeyProvider.
+   * @param flags   flags for this operation.
+   * @throws IOException            if there was a general IO exception
+   * @throws AccessControlException if the caller does not have access to path
+   * @throws FileNotFoundException  if the path does not exist
+   * @throws HadoopIllegalArgumentException if the flags are invalid
+   */
+  public void createEncryptionZone(Path path, String keyName,
+      EnumSet<CreateEncryptionZoneFlag> flags)
+      throws IOException, AccessControlException, FileNotFoundException,
+      HadoopIllegalArgumentException{
+    dfs.createEncryptionZone(path, keyName);
+    if (flags.contains(CreateEncryptionZoneFlag.PROVISION_TRASH)) {
+      if (flags.contains(CreateEncryptionZoneFlag.NO_TRASH)) {
+        throw new HadoopIllegalArgumentException(
+            "can not have both PROVISION_TRASH and NO_TRASH flags");
+      }
+      this.provisionEZTrash(path);
+    }
+  }
+
+  /**
+   * Provision a trash directory for a given encryption zone.
+
+   * @param path the root of the encryption zone
+   * @throws IOException if the trash directory can not be created.
+   */
+  public void provisionEncryptionZoneTrash(Path path) throws IOException {
+    this.provisionEZTrash(path);
+  }
+
+  /**
+   * Get the path of the encryption zone for a given file or directory.
+   *
+   * @param path The path to get the ez for.
+   *
+   * @return The EncryptionZone of the ez, or null if path is not in an ez.
+   * @throws IOException            if there was a general IO exception
+   * @throws AccessControlException if the caller does not have access to path
+   * @throws FileNotFoundException  if the path does not exist
+   */
+  public EncryptionZone getEncryptionZoneForPath(Path path)
+    throws IOException, AccessControlException, FileNotFoundException {
+    return dfs.getEZForPath(path);
+  }
+
+  /**
+   * Returns a RemoteIterator which can be used to list the encryption zones
+   * in HDFS. For large numbers of encryption zones, the iterator will fetch
+   * the list of zones in a number of small batches.
+   * <p/>
+   * Since the list is fetched in batches, it does not represent a
+   * consistent snapshot of the entire list of encryption zones.
+   * <p/>
+   * This method can only be called by HDFS superusers.
+   */
+  public RemoteIterator<EncryptionZone> listEncryptionZones()
+      throws IOException {
+    return dfs.listEncryptionZones();
+  }
+
+  /**
+   * Exposes a stream of namesystem events. Only events occurring after the
+   * stream is created are available.
+   * See {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream}
+   * for information on stream usage.
+   * See {@link org.apache.hadoop.hdfs.inotify.Event}
+   * for information on the available events.
+   * <p/>
+   * Inotify users may want to tune the following HDFS parameters to
+   * ensure that enough extra HDFS edits are saved to support inotify clients
+   * that fall behind the current state of the namespace while reading events.
+   * The default parameter values should generally be reasonable. If edits are
+   * deleted before their corresponding events can be read, clients will see a
+   * {@link org.apache.hadoop.hdfs.inotify.MissingEventsException} on
+   * {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream} method calls.
+   *
+   * It should generally be sufficient to tune these parameters:
+   * dfs.namenode.num.extra.edits.retained
+   * dfs.namenode.max.extra.edits.segments.retained
+   *
+   * Parameters that affect the number of created segments and the number of
+   * edits that are considered necessary, i.e. do not count towards the
+   * dfs.namenode.num.extra.edits.retained quota):
+   * dfs.namenode.checkpoint.period
+   * dfs.namenode.checkpoint.txns
+   * dfs.namenode.num.checkpoints.retained
+   * dfs.ha.log-roll.period
+   * <p/>
+   * It is recommended that local journaling be configured
+   * (dfs.namenode.edits.dir) for inotify (in addition to a shared journal)
+   * so that edit transfers from the shared journal can be avoided.
+   *
+   * @throws IOException If there was an error obtaining the stream.
+   */
+  public DFSInotifyEventInputStream getInotifyEventStream() throws IOException 
{
+    return dfs.getInotifyEventStream();
+  }
+
+  /**
+   * A version of {@link HdfsAdmin#getInotifyEventStream()} meant for advanced
+   * users who are aware of HDFS edits up to lastReadTxid (e.g. because they
+   * have access to an FSImage inclusive of lastReadTxid) and only want to read
+   * events after this point.
+   */
+  public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid)
+      throws IOException {
+    return dfs.getInotifyEventStream(lastReadTxid);
+  }
+
+  /**
+   * Set the source path to the specified storage policy.
+   *
+   * @param src The source path referring to either a directory or a file.
+   * @param policyName The name of the storage policy.
+   */
+  public void setStoragePolicy(final Path src, final String policyName)
+      throws IOException {
+    dfs.setStoragePolicy(src, policyName);
+  }
+
+  /**
+   * Unset the storage policy set for a given file or directory.
+   *
+   * @param src file or directory path.
+   * @throws IOException
+   */
+  public void unsetStoragePolicy(final Path src) throws IOException {
+    dfs.unsetStoragePolicy(src);
+  }
+
+  /**
+   * Query the effective storage policy ID for the given file or directory.
+   *
+   * @param src file or directory path.
+   * @return storage policy for the given file or directory.
+   * @throws IOException
+   */
+  public BlockStoragePolicySpi getStoragePolicy(final Path src)
+      throws IOException {
+    return dfs.getStoragePolicy(src);
+  }
+
+  /**
+   * Retrieve all the storage policies supported by HDFS file system.
+   *
+   * @return all storage policies supported by HDFS file system.
+   * @throws IOException
+   */
+  public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
+      throws IOException {
+    return dfs.getAllStoragePolicies();
+  }
+
+  /**
+   * Set the source path to the specified erasure coding policy.
+   *
+   * @param path The source path referring to a directory.
+   * @param ecPolicy The erasure coding policy for the directory.
+   *                 If null, the default will be used.
+   * @throws IOException
+   */
+  public void setErasureCodingPolicy(final Path path,
+      final ErasureCodingPolicy ecPolicy) throws IOException {
+    dfs.setErasureCodingPolicy(path, ecPolicy);
+  }
+
+  /**
+   * Get the erasure coding policy information for the specified path
+   *
+   * @param path
+   * @return Returns the policy information if file or directory on the path is
+   *          erasure coded. Null otherwise.
+   * @throws IOException
+   */
+  public ErasureCodingPolicy getErasureCodingPolicy(final Path path)
+      throws IOException {
+    return dfs.getErasureCodingPolicy(path);
+  }
+
+  /**
+   * Get the Erasure coding policies supported.
+   *
+   * @throws IOException
+   */
+  public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
+    return dfs.getClient().getErasureCodingPolicies();
+  }
+
+  private void provisionEZTrash(Path path) throws IOException {
+    // make sure the path is an EZ
+    EncryptionZone ez = dfs.getEZForPath(path);
+    if (ez == null) {
+      throw new IllegalArgumentException(path + " is not an encryption zone.");
+    }
+
+    String ezPath = ez.getPath();
+    if (!path.toString().equals(ezPath)) {
+      throw new IllegalArgumentException(path + " is not the root of an " +
+          "encryption zone. Do you mean " + ez.getPath() + "?");
+    }
+
+    // check if the trash directory exists
+
+    Path trashPath = new Path(ez.getPath(), FileSystem.TRASH_PREFIX);
+
+    if (dfs.exists(trashPath)) {
+      String errMessage = "Will not provision new trash directory for " +
+          "encryption zone " + ez.getPath() + ". Path already exists.";
+      FileStatus trashFileStatus = dfs.getFileStatus(trashPath);
+      if (!trashFileStatus.isDirectory()) {
+        errMessage += "\r\n" +
+            "Warning: " + trashPath.toString() + " is not a directory";
+      }
+      if (!trashFileStatus.getPermission().equals(TRASH_PERMISSION)) {
+        errMessage += "\r\n" +
+            "Warning: the permission of " +
+            trashPath.toString() + " is not " + TRASH_PERMISSION;
+      }
+      throw new IOException(errMessage);
+    }
+
+    // Update the permission bits
+    dfs.mkdir(trashPath, TRASH_PERMISSION);
+    dfs.setPermission(trashPath, TRASH_PERMISSION);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92e5e915/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
new file mode 100644
index 0000000..3b77a3f
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.client;
+
+import java.io.IOException;
+import java.net.URI;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The public utility API for HDFS.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class HdfsUtils {
+  public static final Logger LOG = LoggerFactory.getLogger(HdfsUtils.class);
+
+  /**
+   * Is the HDFS healthy?
+   * HDFS is considered as healthy if it is up and not in safemode.
+   *
+   * @param uri the HDFS URI.  Note that the URI path is ignored.
+   * @return true if HDFS is healthy; false, otherwise.
+   */
+  public static boolean isHealthy(URI uri) {
+    //check scheme
+    final String scheme = uri.getScheme();
+    if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(scheme)) {
+      throw new IllegalArgumentException("The scheme is not "
+          + HdfsConstants.HDFS_URI_SCHEME + ", uri=" + uri);
+    }
+
+    final Configuration conf = new Configuration();
+    //disable FileSystem cache
+    conf.setBoolean(String.format("fs.%s.impl.disable.cache", scheme), true);
+    //disable client retry for rpc connection and rpc calls
+    conf.setBoolean(HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, false);
+    conf.setInt(
+        CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
+
+    DistributedFileSystem fs = null;
+    try {
+      fs = (DistributedFileSystem)FileSystem.get(uri, conf);
+      final boolean safemode = fs.setSafeMode(SafeModeAction.SAFEMODE_GET);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Is namenode in safemode? " + safemode + "; uri=" + uri);
+      }
+
+      fs.close();
+      fs = null;
+      return !safemode;
+    } catch(IOException e) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Got an exception for uri=" + uri, e);
+      }
+      return false;
+    } finally {
+      IOUtils.closeQuietly(fs);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92e5e915/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/package-info.java
new file mode 100644
index 0000000..95eceb7
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/package-info.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This package provides the administrative APIs for HDFS.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+package org.apache.hadoop.hdfs.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92e5e915/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java
deleted file mode 100644
index ccf9193..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.client;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * CreateEncryptionZoneFlag is used in
- * {@link HdfsAdmin#createEncryptionZone(Path, String, EnumSet)} to indicate
- * what should be done when creating an encryption zone.
- *
- * Use CreateEncryptionZoneFlag as follows:
- * <ol>
- *   <li>PROVISION_TRASH - provision a trash directory for the encryption zone
- *   to support soft delete.</li>
- * </ol>
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public enum CreateEncryptionZoneFlag {
-
-  /**
-   * Do not provision a trash directory in the encryption zone.
-   *
-   * @see CreateEncryptionZoneFlag#NO_TRASH
-   */
-  NO_TRASH((short) 0x00),
-  /**
-   * Provision a trash directory .Trash/ in the
-   * encryption zone.
-   *
-   * @see CreateEncryptionZoneFlag#PROVISION_TRASH
-   */
-  PROVISION_TRASH((short) 0x01);
-
-  private final short mode;
-
-  CreateEncryptionZoneFlag(short mode) {
-    this.mode = mode;
-  }
-
-  public static CreateEncryptionZoneFlag valueOf(short mode) {
-    for (CreateEncryptionZoneFlag flag : CreateEncryptionZoneFlag.values()) {
-      if (flag.getMode() == mode) {
-        return flag;
-      }
-    }
-    return null;
-  }
-
-  public short getMode() {
-    return mode;
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92e5e915/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
deleted file mode 100644
index b9cf5fb..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
+++ /dev/null
@@ -1,524 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.client;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.URI;
-import java.util.Collection;
-import java.util.EnumSet;
-
-import org.apache.hadoop.HadoopIllegalArgumentException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockStoragePolicySpi;
-import org.apache.hadoop.fs.CacheFlag;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSInotifyEventInputStream;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
-import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
-import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
-import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
-import org.apache.hadoop.hdfs.protocol.EncryptionZone;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.hdfs.tools.DFSAdmin;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-
-/**
- * The public API for performing administrative functions on HDFS. Those 
writing
- * applications against HDFS should prefer this interface to directly accessing
- * functionality in DistributedFileSystem or DFSClient.
- * 
- * Note that this is distinct from the similarly-named {@link DFSAdmin}, which
- * is a class that provides the functionality for the CLI `hdfs dfsadmin ...'
- * commands.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public class HdfsAdmin {
-  
-  private DistributedFileSystem dfs;
-  private static final FsPermission TRASH_PERMISSION = new FsPermission(
-      FsAction.ALL, FsAction.ALL, FsAction.ALL, true);
-  
-  /**
-   * Create a new HdfsAdmin client.
-   * 
-   * @param uri the unique URI of the HDFS file system to administer
-   * @param conf configuration
-   * @throws IOException in the event the file system could not be created
-   */
-  public HdfsAdmin(URI uri, Configuration conf) throws IOException {
-    FileSystem fs = FileSystem.get(uri, conf);
-    if (!(fs instanceof DistributedFileSystem)) {
-      throw new IllegalArgumentException("'" + uri + "' is not an HDFS URI.");
-    } else {
-      dfs = (DistributedFileSystem)fs;
-    }
-  }
-  
-  /**
-   * Set the namespace quota (count of files, directories, and sym links) for a
-   * directory.
-   * 
-   * @param src the path to set the quota for
-   * @param quota the value to set for the quota
-   * @throws IOException in the event of error
-   */
-  public void setQuota(Path src, long quota) throws IOException {
-    dfs.setQuota(src, quota, HdfsConstants.QUOTA_DONT_SET);
-  }
-  
-  /**
-   * Clear the namespace quota (count of files, directories and sym links) for 
a
-   * directory.
-   * 
-   * @param src the path to clear the quota of
-   * @throws IOException in the event of error
-   */
-  public void clearQuota(Path src) throws IOException {
-    dfs.setQuota(src, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_DONT_SET);
-  }
-  
-  /**
-   * Set the storage space quota (size of files) for a directory. Note that
-   * directories and sym links do not occupy storage space.
-   * 
-   * @param src the path to set the space quota of
-   * @param spaceQuota the value to set for the space quota
-   * @throws IOException in the event of error
-   */
-  public void setSpaceQuota(Path src, long spaceQuota) throws IOException {
-    dfs.setQuota(src, HdfsConstants.QUOTA_DONT_SET, spaceQuota);
-  }
-  
-  /**
-   * Clear the storage space quota (size of files) for a directory. Note that
-   * directories and sym links do not occupy storage space.
-   * 
-   * @param src the path to clear the space quota of
-   * @throws IOException in the event of error
-   */
-  public void clearSpaceQuota(Path src) throws IOException {
-    dfs.setQuota(src, HdfsConstants.QUOTA_DONT_SET, HdfsConstants.QUOTA_RESET);
-  }
-
-  /**
-   * Set the quota by storage type for a directory. Note that
-   * directories and sym links do not occupy storage type quota.
-   *
-   * @param src the target directory to set the quota by storage type
-   * @param type the storage type to set for quota by storage type
-   * @param quota the value to set for quota by storage type
-   * @throws IOException in the event of error
-   */
-  public void setQuotaByStorageType(Path src, StorageType type, long quota)
-      throws IOException {
-    dfs.setQuotaByStorageType(src, type, quota);
-  }
-
-  /**
-   * Clear the space quota by storage type for a directory. Note that
-   * directories and sym links do not occupy storage type quota.
-   *
-   * @param src the target directory to clear the quota by storage type
-   * @param type the storage type to clear for quota by storage type
-   * @throws IOException in the event of error
-   */
-  public void clearQuotaByStorageType(Path src, StorageType type) throws 
IOException {
-    dfs.setQuotaByStorageType(src, type, HdfsConstants.QUOTA_RESET);
-  }
-  
-  /**
-   * Allow snapshot on a directory.
-   * @param path The path of the directory where snapshots will be taken.
-   */
-  public void allowSnapshot(Path path) throws IOException {
-    dfs.allowSnapshot(path);
-  }
-  
-  /**
-   * Disallow snapshot on a directory.
-   * @param path The path of the snapshottable directory.
-   */
-  public void disallowSnapshot(Path path) throws IOException {
-    dfs.disallowSnapshot(path);
-  }
-
-  /**
-   * Add a new CacheDirectiveInfo.
-   * 
-   * @param info Information about a directive to add.
-   * @param flags {@link CacheFlag}s to use for this operation.
-   * @return the ID of the directive that was created.
-   * @throws IOException if the directive could not be added
-   */
-  public long addCacheDirective(CacheDirectiveInfo info,
-      EnumSet<CacheFlag> flags) throws IOException {
-  return dfs.addCacheDirective(info, flags);
-  }
-  
-  /**
-   * Modify a CacheDirective.
-   * 
-   * @param info Information about the directive to modify. You must set the ID
-   *          to indicate which CacheDirective you want to modify.
-   * @param flags {@link CacheFlag}s to use for this operation.
-   * @throws IOException if the directive could not be modified
-   */
-  public void modifyCacheDirective(CacheDirectiveInfo info,
-      EnumSet<CacheFlag> flags) throws IOException {
-    dfs.modifyCacheDirective(info, flags);
-  }
-
-  /**
-   * Remove a CacheDirective.
-   * 
-   * @param id identifier of the CacheDirectiveInfo to remove
-   * @throws IOException if the directive could not be removed
-   */
-  public void removeCacheDirective(long id)
-      throws IOException {
-    dfs.removeCacheDirective(id);
-  }
-
-  /**
-   * List cache directives. Incrementally fetches results from the server.
-   * 
-   * @param filter Filter parameters to use when listing the directives, null 
to
-   *               list all directives visible to us.
-   * @return A RemoteIterator which returns CacheDirectiveInfo objects.
-   */
-  public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
-      CacheDirectiveInfo filter) throws IOException {
-    return dfs.listCacheDirectives(filter);
-  }
-
-  /**
-   * Add a cache pool.
-   *
-   * @param info
-   *          The request to add a cache pool.
-   * @throws IOException 
-   *          If the request could not be completed.
-   */
-  public void addCachePool(CachePoolInfo info) throws IOException {
-    dfs.addCachePool(info);
-  }
-
-  /**
-   * Modify an existing cache pool.
-   *
-   * @param info
-   *          The request to modify a cache pool.
-   * @throws IOException 
-   *          If the request could not be completed.
-   */
-  public void modifyCachePool(CachePoolInfo info) throws IOException {
-    dfs.modifyCachePool(info);
-  }
-    
-  /**
-   * Remove a cache pool.
-   *
-   * @param poolName
-   *          Name of the cache pool to remove.
-   * @throws IOException 
-   *          if the cache pool did not exist, or could not be removed.
-   */
-  public void removeCachePool(String poolName) throws IOException {
-    dfs.removeCachePool(poolName);
-  }
-
-  /**
-   * List all cache pools.
-   *
-   * @return A remote iterator from which you can get CachePoolEntry objects.
-   *          Requests will be made as needed.
-   * @throws IOException
-   *          If there was an error listing cache pools.
-   */
-  public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
-    return dfs.listCachePools();
-  }
-
-  /**
-   * Create an encryption zone rooted at an empty existing directory, using the
-   * specified encryption key. An encryption zone has an associated encryption
-   * key used when reading and writing files within the zone.
-   *
-   * @param path    The path of the root of the encryption zone. Must refer to
-   *                an empty, existing directory.
-   * @param keyName Name of key available at the KeyProvider.
-   * @throws IOException            if there was a general IO exception
-   * @throws AccessControlException if the caller does not have access to path
-   * @throws FileNotFoundException  if the path does not exist
-   */
-  @Deprecated
-  public void createEncryptionZone(Path path, String keyName)
-      throws IOException, AccessControlException, FileNotFoundException {
-    dfs.createEncryptionZone(path, keyName);
-  }
-
-  /**
-   * Create an encryption zone rooted at an empty existing directory, using the
-   * specified encryption key. An encryption zone has an associated encryption
-   * key used when reading and writing files within the zone.
-   *
-   * Additional options, such as provisioning the trash directory, can be
-   * specified using {@link CreateEncryptionZoneFlag} flags.
-   *
-   * @param path    The path of the root of the encryption zone. Must refer to
-   *                an empty, existing directory.
-   * @param keyName Name of key available at the KeyProvider.
-   * @param flags   flags for this operation.
-   * @throws IOException            if there was a general IO exception
-   * @throws AccessControlException if the caller does not have access to path
-   * @throws FileNotFoundException  if the path does not exist
-   * @throws HadoopIllegalArgumentException if the flags are invalid
-   */
-  public void createEncryptionZone(Path path, String keyName,
-      EnumSet<CreateEncryptionZoneFlag> flags)
-      throws IOException, AccessControlException, FileNotFoundException,
-      HadoopIllegalArgumentException{
-    dfs.createEncryptionZone(path, keyName);
-    if (flags.contains(CreateEncryptionZoneFlag.PROVISION_TRASH)) {
-      if (flags.contains(CreateEncryptionZoneFlag.NO_TRASH)) {
-        throw new HadoopIllegalArgumentException(
-            "can not have both PROVISION_TRASH and NO_TRASH flags");
-      }
-      this.provisionEZTrash(path);
-    }
-  }
-
-  /**
-   * Provision a trash directory for a given encryption zone.
-
-   * @param path the root of the encryption zone
-   * @throws IOException if the trash directory can not be created.
-   */
-  public void provisionEncryptionZoneTrash(Path path) throws IOException {
-    this.provisionEZTrash(path);
-  }
-
-  /**
-   * Get the path of the encryption zone for a given file or directory.
-   *
-   * @param path The path to get the ez for.
-   *
-   * @return The EncryptionZone of the ez, or null if path is not in an ez.
-   * @throws IOException            if there was a general IO exception
-   * @throws AccessControlException if the caller does not have access to path
-   * @throws FileNotFoundException  if the path does not exist
-   */
-  public EncryptionZone getEncryptionZoneForPath(Path path)
-    throws IOException, AccessControlException, FileNotFoundException {
-    return dfs.getEZForPath(path);
-  }
-
-  /**
-   * Returns a RemoteIterator which can be used to list the encryption zones
-   * in HDFS. For large numbers of encryption zones, the iterator will fetch
-   * the list of zones in a number of small batches.
-   * <p/>
-   * Since the list is fetched in batches, it does not represent a
-   * consistent snapshot of the entire list of encryption zones.
-   * <p/>
-   * This method can only be called by HDFS superusers.
-   */
-  public RemoteIterator<EncryptionZone> listEncryptionZones()
-      throws IOException {
-    return dfs.listEncryptionZones();
-  }
-
-  /**
-   * Exposes a stream of namesystem events. Only events occurring after the
-   * stream is created are available.
-   * See {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream}
-   * for information on stream usage.
-   * See {@link org.apache.hadoop.hdfs.inotify.Event}
-   * for information on the available events.
-   * <p/>
-   * Inotify users may want to tune the following HDFS parameters to
-   * ensure that enough extra HDFS edits are saved to support inotify clients
-   * that fall behind the current state of the namespace while reading events.
-   * The default parameter values should generally be reasonable. If edits are
-   * deleted before their corresponding events can be read, clients will see a
-   * {@link org.apache.hadoop.hdfs.inotify.MissingEventsException} on
-   * {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream} method calls.
-   *
-   * It should generally be sufficient to tune these parameters:
-   * dfs.namenode.num.extra.edits.retained
-   * dfs.namenode.max.extra.edits.segments.retained
-   *
-   * Parameters that affect the number of created segments and the number of
-   * edits that are considered necessary, i.e. do not count towards the
-   * dfs.namenode.num.extra.edits.retained quota):
-   * dfs.namenode.checkpoint.period
-   * dfs.namenode.checkpoint.txns
-   * dfs.namenode.num.checkpoints.retained
-   * dfs.ha.log-roll.period
-   * <p/>
-   * It is recommended that local journaling be configured
-   * (dfs.namenode.edits.dir) for inotify (in addition to a shared journal)
-   * so that edit transfers from the shared journal can be avoided.
-   *
-   * @throws IOException If there was an error obtaining the stream.
-   */
-  public DFSInotifyEventInputStream getInotifyEventStream() throws IOException 
{
-    return dfs.getInotifyEventStream();
-  }
-
-  /**
-   * A version of {@link HdfsAdmin#getInotifyEventStream()} meant for advanced
-   * users who are aware of HDFS edits up to lastReadTxid (e.g. because they
-   * have access to an FSImage inclusive of lastReadTxid) and only want to read
-   * events after this point.
-   */
-  public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid)
-      throws IOException {
-    return dfs.getInotifyEventStream(lastReadTxid);
-  }
-
-  /**
-   * Set the source path to the specified storage policy.
-   *
-   * @param src The source path referring to either a directory or a file.
-   * @param policyName The name of the storage policy.
-   */
-  public void setStoragePolicy(final Path src, final String policyName)
-      throws IOException {
-    dfs.setStoragePolicy(src, policyName);
-  }
-
-  /**
-   * Unset the storage policy set for a given file or directory.
-   *
-   * @param src file or directory path.
-   * @throws IOException
-   */
-  public void unsetStoragePolicy(final Path src) throws IOException {
-    dfs.unsetStoragePolicy(src);
-  }
-
-  /**
-   * Query the effective storage policy ID for the given file or directory.
-   *
-   * @param src file or directory path.
-   * @return storage policy for the given file or directory.
-   * @throws IOException
-   */
-  public BlockStoragePolicySpi getStoragePolicy(final Path src)
-      throws IOException {
-    return dfs.getStoragePolicy(src);
-  }
-
-  /**
-   * Retrieve all the storage policies supported by HDFS file system.
-   *
-   * @return all storage policies supported by HDFS file system.
-   * @throws IOException
-   */
-  public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
-      throws IOException {
-    return dfs.getAllStoragePolicies();
-  }
-
-  /**
-   * Set the source path to the specified erasure coding policy.
-   *
-   * @param path The source path referring to a directory.
-   * @param ecPolicy The erasure coding policy for the directory.
-   *                 If null, the default will be used.
-   * @throws IOException
-   */
-  public void setErasureCodingPolicy(final Path path,
-      final ErasureCodingPolicy ecPolicy) throws IOException {
-    dfs.setErasureCodingPolicy(path, ecPolicy);
-  }
-
-  /**
-   * Get the erasure coding policy information for the specified path
-   *
-   * @param path
-   * @return Returns the policy information if file or directory on the path is
-   *          erasure coded. Null otherwise.
-   * @throws IOException
-   */
-  public ErasureCodingPolicy getErasureCodingPolicy(final Path path)
-      throws IOException {
-    return dfs.getErasureCodingPolicy(path);
-  }
-
-  /**
-   * Get the Erasure coding policies supported.
-   *
-   * @throws IOException
-   */
-  public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
-    return dfs.getClient().getErasureCodingPolicies();
-  }
-
-  private void provisionEZTrash(Path path) throws IOException {
-    // make sure the path is an EZ
-    EncryptionZone ez = dfs.getEZForPath(path);
-    if (ez == null) {
-      throw new IllegalArgumentException(path + " is not an encryption zone.");
-    }
-
-    String ezPath = ez.getPath();
-    if (!path.toString().equals(ezPath)) {
-      throw new IllegalArgumentException(path + " is not the root of an " +
-          "encryption zone. Do you mean " + ez.getPath() + "?");
-    }
-
-    // check if the trash directory exists
-
-    Path trashPath = new Path(ez.getPath(), FileSystem.TRASH_PREFIX);
-
-    if (dfs.exists(trashPath)) {
-      String errMessage = "Will not provision new trash directory for " +
-          "encryption zone " + ez.getPath() + ". Path already exists.";
-      FileStatus trashFileStatus = dfs.getFileStatus(trashPath);
-      if (!trashFileStatus.isDirectory()) {
-        errMessage += "\r\n" +
-            "Warning: " + trashPath.toString() + " is not a directory";
-      }
-      if (!trashFileStatus.getPermission().equals(TRASH_PERMISSION)) {
-        errMessage += "\r\n" +
-            "Warning: the permission of " +
-            trashPath.toString() + " is not " + TRASH_PERMISSION;
-      }
-      throw new IOException(errMessage);
-    }
-
-    // Update the permission bits
-    dfs.mkdir(trashPath, TRASH_PERMISSION);
-    dfs.setPermission(trashPath, TRASH_PERMISSION);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92e5e915/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
deleted file mode 100644
index f87de97..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.client;
-
-import java.io.IOException;
-import java.net.URI;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.hadoop.io.IOUtils;
-
-/**
- * The public utility API for HDFS.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public class HdfsUtils {
-  private static final Log LOG = LogFactory.getLog(HdfsUtils.class);
-
-  /**
-   * Is the HDFS healthy?
-   * HDFS is considered as healthy if it is up and not in safemode.
-   *
-   * @param uri the HDFS URI.  Note that the URI path is ignored.
-   * @return true if HDFS is healthy; false, otherwise.
-   */
-  public static boolean isHealthy(URI uri) {
-    //check scheme
-    final String scheme = uri.getScheme();
-    if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(scheme)) {
-      throw new IllegalArgumentException("The scheme is not "
-          + HdfsConstants.HDFS_URI_SCHEME + ", uri=" + uri);
-    }
-    
-    final Configuration conf = new Configuration();
-    //disable FileSystem cache
-    conf.setBoolean(String.format("fs.%s.impl.disable.cache", scheme), true);
-    //disable client retry for rpc connection and rpc calls
-    conf.setBoolean(HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, false);
-    conf.setInt(
-        CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
-
-    DistributedFileSystem fs = null;
-    try {
-      fs = (DistributedFileSystem)FileSystem.get(uri, conf);
-      final boolean safemode = fs.setSafeMode(SafeModeAction.SAFEMODE_GET);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Is namenode in safemode? " + safemode + "; uri=" + uri);
-      }
-
-      fs.close();
-      fs = null;
-      return !safemode;
-    } catch(IOException e) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Got an exception for uri=" + uri, e);
-      }
-      return false;
-    } finally {
-      IOUtils.cleanup(LOG, fs);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92e5e915/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/package-info.java
deleted file mode 100644
index 95eceb7..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/package-info.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package provides the administrative APIs for HDFS.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-package org.apache.hadoop.hdfs.client;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to