http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
deleted file mode 100644
index d014a1f..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
+++ /dev/null
@@ -1,714 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.client.impl;
-
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.HadoopIllegalArgumentException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.Options.ChecksumOpt;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.BlockReaderFactory;
-import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.util.ByteArrayManager;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.util.DataChecksum;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * DFSClient configuration 
- */
-public class DfsClientConf {
-
-  private final int hdfsTimeout;    // timeout value for a DFS operation.
-
-  private final int maxFailoverAttempts;
-  private final int maxRetryAttempts;
-  private final int failoverSleepBaseMillis;
-  private final int failoverSleepMaxMillis;
-  private final int maxBlockAcquireFailures;
-  private final int datanodeSocketWriteTimeout;
-  private final int ioBufferSize;
-  private final ChecksumOpt defaultChecksumOpt;
-  private final int writePacketSize;
-  private final int writeMaxPackets;
-  private final ByteArrayManager.Conf writeByteArrayManagerConf;
-  private final int socketTimeout;
-  private final long excludedNodesCacheExpiry;
-  /** Wait time window (in msec) if BlockMissingException is caught */
-  private final int timeWindow;
-  private final int numCachedConnRetry;
-  private final int numBlockWriteRetry;
-  private final int numBlockWriteLocateFollowingRetry;
-  private final int blockWriteLocateFollowingInitialDelayMs;
-  private final long defaultBlockSize;
-  private final long prefetchSize;
-  private final short defaultReplication;
-  private final String taskId;
-  private final FsPermission uMask;
-  private final boolean connectToDnViaHostname;
-  private final int retryTimesForGetLastBlockLength;
-  private final int retryIntervalForGetLastBlockLength;
-  private final long datanodeRestartTimeout;
-  private final long slowIoWarningThresholdMs;
-
-  private final ShortCircuitConf shortCircuitConf;
-  
-  private final long hedgedReadThresholdMillis;
-  private final int hedgedReadThreadpoolSize;
-
-  public DfsClientConf(Configuration conf) {
-    // The hdfsTimeout is currently the same as the ipc timeout 
-    hdfsTimeout = Client.getTimeout(conf);
-
-    maxRetryAttempts = conf.getInt(
-        HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_KEY,
-        HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_DEFAULT);
-    timeWindow = conf.getInt(
-        HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY,
-        HdfsClientConfigKeys.Retry.WINDOW_BASE_DEFAULT);
-    retryTimesForGetLastBlockLength = conf.getInt(
-        HdfsClientConfigKeys.Retry.TIMES_GET_LAST_BLOCK_LENGTH_KEY,
-        HdfsClientConfigKeys.Retry.TIMES_GET_LAST_BLOCK_LENGTH_DEFAULT);
-    retryIntervalForGetLastBlockLength = conf.getInt(
-        HdfsClientConfigKeys.Retry.INTERVAL_GET_LAST_BLOCK_LENGTH_KEY,
-        HdfsClientConfigKeys.Retry.INTERVAL_GET_LAST_BLOCK_LENGTH_DEFAULT);
-
-    maxFailoverAttempts = conf.getInt(
-        HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_KEY,
-        HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_DEFAULT);
-    failoverSleepBaseMillis = conf.getInt(
-        HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_KEY,
-        HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_DEFAULT);
-    failoverSleepMaxMillis = conf.getInt(
-        HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_KEY,
-        HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_DEFAULT);
-
-    maxBlockAcquireFailures = conf.getInt(
-        DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
-        DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
-    datanodeSocketWriteTimeout = 
conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
-        HdfsServerConstants.WRITE_TIMEOUT);
-    ioBufferSize = conf.getInt(
-        CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
-        CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
-    defaultChecksumOpt = getChecksumOptFromConf(conf);
-    socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
-        HdfsServerConstants.READ_TIMEOUT);
-    /** dfs.write.packet.size is an internal config variable */
-    writePacketSize = conf.getInt(
-        DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
-        DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
-    writeMaxPackets = conf.getInt(
-        HdfsClientConfigKeys.Write.MAX_PACKETS_IN_FLIGHT_KEY,
-        HdfsClientConfigKeys.Write.MAX_PACKETS_IN_FLIGHT_DEFAULT);
-    
-    final boolean byteArrayManagerEnabled = conf.getBoolean(
-        HdfsClientConfigKeys.Write.ByteArrayManager.ENABLED_KEY,
-        HdfsClientConfigKeys.Write.ByteArrayManager.ENABLED_DEFAULT);
-    if (!byteArrayManagerEnabled) {
-      writeByteArrayManagerConf = null;
-    } else {
-      final int countThreshold = conf.getInt(
-          HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_THRESHOLD_KEY,
-          HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_THRESHOLD_DEFAULT);
-      final int countLimit = conf.getInt(
-          HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_LIMIT_KEY,
-          HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_LIMIT_DEFAULT);
-      final long countResetTimePeriodMs = conf.getLong(
-          
HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_RESET_TIME_PERIOD_MS_KEY,
-          
HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_RESET_TIME_PERIOD_MS_DEFAULT);
-      writeByteArrayManagerConf = new ByteArrayManager.Conf(
-          countThreshold, countLimit, countResetTimePeriodMs); 
-    }
-    
-    defaultBlockSize = conf.getLongBytes(DFS_BLOCK_SIZE_KEY,
-        DFS_BLOCK_SIZE_DEFAULT);
-    defaultReplication = (short) conf.getInt(
-        DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);
-    taskId = conf.get("mapreduce.task.attempt.id", "NONMAPREDUCE");
-    excludedNodesCacheExpiry = conf.getLong(
-        HdfsClientConfigKeys.Write.EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_KEY,
-        
HdfsClientConfigKeys.Write.EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT);
-    prefetchSize = conf.getLong(HdfsClientConfigKeys.Read.PREFETCH_SIZE_KEY,
-        10 * defaultBlockSize);
-    numCachedConnRetry = conf.getInt(DFS_CLIENT_CACHED_CONN_RETRY_KEY,
-        DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT);
-    numBlockWriteRetry = conf.getInt(
-        HdfsClientConfigKeys.BlockWrite.RETRIES_KEY,
-        HdfsClientConfigKeys.BlockWrite.RETRIES_DEFAULT);
-    numBlockWriteLocateFollowingRetry = conf.getInt(
-        HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_KEY,
-        HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT);
-    blockWriteLocateFollowingInitialDelayMs = conf.getInt(
-        
HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_KEY,
-        
HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_DEFAULT);
-    uMask = FsPermission.getUMask(conf);
-    connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME,
-        DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
-
-    datanodeRestartTimeout = conf.getLong(
-        DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY,
-        DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT) * 1000;
-    slowIoWarningThresholdMs = conf.getLong(
-        DFSConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY,
-        DFSConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT);
-    
-    shortCircuitConf = new ShortCircuitConf(conf);
-
-    hedgedReadThresholdMillis = conf.getLong(
-        HdfsClientConfigKeys.HedgedRead.THRESHOLD_MILLIS_KEY,
-        HdfsClientConfigKeys.HedgedRead.THRESHOLD_MILLIS_DEFAULT);
-    hedgedReadThreadpoolSize = conf.getInt(
-        HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_KEY,
-        HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_DEFAULT);
-  }
-
-  private DataChecksum.Type getChecksumType(Configuration conf) {
-    final String checksum = conf.get(
-        DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,
-        DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
-    try {
-      return DataChecksum.Type.valueOf(checksum);
-    } catch(IllegalArgumentException iae) {
-      DFSClient.LOG.warn("Bad checksum type: " + checksum + ". Using default "
-          + DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
-      return DataChecksum.Type.valueOf(
-          DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT); 
-    }
-  }
-
-  // Construct a checksum option from conf
-  private ChecksumOpt getChecksumOptFromConf(Configuration conf) {
-    DataChecksum.Type type = getChecksumType(conf);
-    int bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
-        DFS_BYTES_PER_CHECKSUM_DEFAULT);
-    return new ChecksumOpt(type, bytesPerChecksum);
-  }
-
-  /** create a DataChecksum with the given option. */
-  public DataChecksum createChecksum(ChecksumOpt userOpt) {
-    // Fill in any missing field with the default.
-    ChecksumOpt opt = ChecksumOpt.processChecksumOpt(
-        defaultChecksumOpt, userOpt);
-    DataChecksum dataChecksum = DataChecksum.newDataChecksum(
-        opt.getChecksumType(),
-        opt.getBytesPerChecksum());
-    if (dataChecksum == null) {
-      throw new HadoopIllegalArgumentException("Invalid checksum type: 
userOpt="
-          + userOpt + ", default=" + defaultChecksumOpt
-          + ", effective=null");
-    }
-    return dataChecksum;
-  }
-
-  @VisibleForTesting
-  public int getBlockWriteLocateFollowingInitialDelayMs() {
-    return blockWriteLocateFollowingInitialDelayMs;
-  }
-
-  /**
-   * @return the hdfsTimeout
-   */
-  public int getHdfsTimeout() {
-    return hdfsTimeout;
-  }
-
-  /**
-   * @return the maxFailoverAttempts
-   */
-  public int getMaxFailoverAttempts() {
-    return maxFailoverAttempts;
-  }
-
-  /**
-   * @return the maxRetryAttempts
-   */
-  public int getMaxRetryAttempts() {
-    return maxRetryAttempts;
-  }
-
-  /**
-   * @return the failoverSleepBaseMillis
-   */
-  public int getFailoverSleepBaseMillis() {
-    return failoverSleepBaseMillis;
-  }
-
-  /**
-   * @return the failoverSleepMaxMillis
-   */
-  public int getFailoverSleepMaxMillis() {
-    return failoverSleepMaxMillis;
-  }
-
-  /**
-   * @return the maxBlockAcquireFailures
-   */
-  public int getMaxBlockAcquireFailures() {
-    return maxBlockAcquireFailures;
-  }
-
-  /**
-   * @return the datanodeSocketWriteTimeout
-   */
-  public int getDatanodeSocketWriteTimeout() {
-    return datanodeSocketWriteTimeout;
-  }
-
-  /**
-   * @return the ioBufferSize
-   */
-  public int getIoBufferSize() {
-    return ioBufferSize;
-  }
-
-  /**
-   * @return the defaultChecksumOpt
-   */
-  public ChecksumOpt getDefaultChecksumOpt() {
-    return defaultChecksumOpt;
-  }
-
-  /**
-   * @return the writePacketSize
-   */
-  public int getWritePacketSize() {
-    return writePacketSize;
-  }
-
-  /**
-   * @return the writeMaxPackets
-   */
-  public int getWriteMaxPackets() {
-    return writeMaxPackets;
-  }
-
-  /**
-   * @return the writeByteArrayManagerConf
-   */
-  public ByteArrayManager.Conf getWriteByteArrayManagerConf() {
-    return writeByteArrayManagerConf;
-  }
-
-  /**
-   * @return the socketTimeout
-   */
-  public int getSocketTimeout() {
-    return socketTimeout;
-  }
-
-  /**
-   * @return the excludedNodesCacheExpiry
-   */
-  public long getExcludedNodesCacheExpiry() {
-    return excludedNodesCacheExpiry;
-  }
-
-  /**
-   * @return the timeWindow
-   */
-  public int getTimeWindow() {
-    return timeWindow;
-  }
-
-  /**
-   * @return the numCachedConnRetry
-   */
-  public int getNumCachedConnRetry() {
-    return numCachedConnRetry;
-  }
-
-  /**
-   * @return the numBlockWriteRetry
-   */
-  public int getNumBlockWriteRetry() {
-    return numBlockWriteRetry;
-  }
-
-  /**
-   * @return the numBlockWriteLocateFollowingRetry
-   */
-  public int getNumBlockWriteLocateFollowingRetry() {
-    return numBlockWriteLocateFollowingRetry;
-  }
-
-  /**
-   * @return the defaultBlockSize
-   */
-  public long getDefaultBlockSize() {
-    return defaultBlockSize;
-  }
-
-  /**
-   * @return the prefetchSize
-   */
-  public long getPrefetchSize() {
-    return prefetchSize;
-  }
-
-  /**
-   * @return the defaultReplication
-   */
-  public short getDefaultReplication() {
-    return defaultReplication;
-  }
-
-  /**
-   * @return the taskId
-   */
-  public String getTaskId() {
-    return taskId;
-  }
-
-  /**
-   * @return the uMask
-   */
-  public FsPermission getUMask() {
-    return uMask;
-  }
-
-  /**
-   * @return the connectToDnViaHostname
-   */
-  public boolean isConnectToDnViaHostname() {
-    return connectToDnViaHostname;
-  }
-
-  /**
-   * @return the retryTimesForGetLastBlockLength
-   */
-  public int getRetryTimesForGetLastBlockLength() {
-    return retryTimesForGetLastBlockLength;
-  }
-
-  /**
-   * @return the retryIntervalForGetLastBlockLength
-   */
-  public int getRetryIntervalForGetLastBlockLength() {
-    return retryIntervalForGetLastBlockLength;
-  }
-
-  /**
-   * @return the datanodeRestartTimeout
-   */
-  public long getDatanodeRestartTimeout() {
-    return datanodeRestartTimeout;
-  }
-
-  /**
-   * @return the slowIoWarningThresholdMs
-   */
-  public long getSlowIoWarningThresholdMs() {
-    return slowIoWarningThresholdMs;
-  }
-
-  /**
-   * @return the hedgedReadThresholdMillis
-   */
-  public long getHedgedReadThresholdMillis() {
-    return hedgedReadThresholdMillis;
-  }
-
-  /**
-   * @return the hedgedReadThreadpoolSize
-   */
-  public int getHedgedReadThreadpoolSize() {
-    return hedgedReadThreadpoolSize;
-  }
-
-  /**
-   * @return the shortCircuitConf
-   */
-  public ShortCircuitConf getShortCircuitConf() {
-    return shortCircuitConf;
-  }
-
-  public static class ShortCircuitConf {
-    private static final Log LOG = LogFactory.getLog(ShortCircuitConf.class);
-
-    private final int socketCacheCapacity;
-    private final long socketCacheExpiry;
-
-    private final boolean useLegacyBlockReader;
-    private final boolean useLegacyBlockReaderLocal;
-    private final String domainSocketPath;
-    private final boolean skipShortCircuitChecksums;
-
-    private final int shortCircuitBufferSize;
-    private final boolean shortCircuitLocalReads;
-    private final boolean domainSocketDataTraffic;
-    private final int shortCircuitStreamsCacheSize;
-    private final long shortCircuitStreamsCacheExpiryMs; 
-    private final int shortCircuitSharedMemoryWatcherInterruptCheckMs;
-    
-    private final boolean shortCircuitMmapEnabled;
-    private final int shortCircuitMmapCacheSize;
-    private final long shortCircuitMmapCacheExpiryMs;
-    private final long shortCircuitMmapCacheRetryTimeout;
-    private final long shortCircuitCacheStaleThresholdMs;
-
-    private final long keyProviderCacheExpiryMs;
-
-    @VisibleForTesting
-    public BlockReaderFactory.FailureInjector brfFailureInjector =
-        new BlockReaderFactory.FailureInjector();
-
-    public ShortCircuitConf(Configuration conf) {
-      socketCacheCapacity = conf.getInt(
-          DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY,
-          DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT);
-      socketCacheExpiry = conf.getLong(
-          DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,
-          DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT);
-
-      useLegacyBlockReader = conf.getBoolean(
-          DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER,
-          DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT);
-      useLegacyBlockReaderLocal = conf.getBoolean(
-          DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
-          DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT);
-      shortCircuitLocalReads = conf.getBoolean(
-          HdfsClientConfigKeys.Read.ShortCircuit.KEY,
-          HdfsClientConfigKeys.Read.ShortCircuit.DEFAULT);
-      domainSocketDataTraffic = conf.getBoolean(
-          DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC,
-          DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT);
-      domainSocketPath = conf.getTrimmed(
-          DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
-          DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT);
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL
-            + " = " + useLegacyBlockReaderLocal);
-        LOG.debug(HdfsClientConfigKeys.Read.ShortCircuit.KEY
-            + " = " + shortCircuitLocalReads);
-        LOG.debug(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC
-            + " = " + domainSocketDataTraffic);
-        LOG.debug(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY
-            + " = " + domainSocketPath);
-      }
-
-      skipShortCircuitChecksums = conf.getBoolean(
-          HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
-          HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_DEFAULT);
-      shortCircuitBufferSize = conf.getInt(
-          HdfsClientConfigKeys.Read.ShortCircuit.BUFFER_SIZE_KEY,
-          HdfsClientConfigKeys.Read.ShortCircuit.BUFFER_SIZE_DEFAULT);
-      shortCircuitStreamsCacheSize = conf.getInt(
-          HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_SIZE_KEY,
-          HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_SIZE_DEFAULT);
-      shortCircuitStreamsCacheExpiryMs = conf.getLong(
-          HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_EXPIRY_MS_KEY,
-          
HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_EXPIRY_MS_DEFAULT);
-      shortCircuitMmapEnabled = conf.getBoolean(
-          HdfsClientConfigKeys.Mmap.ENABLED_KEY,
-          HdfsClientConfigKeys.Mmap.ENABLED_DEFAULT);
-      shortCircuitMmapCacheSize = conf.getInt(
-          HdfsClientConfigKeys.Mmap.CACHE_SIZE_KEY,
-          HdfsClientConfigKeys.Mmap.CACHE_SIZE_DEFAULT);
-      shortCircuitMmapCacheExpiryMs = conf.getLong(
-          HdfsClientConfigKeys.Mmap.CACHE_TIMEOUT_MS_KEY,
-          HdfsClientConfigKeys.Mmap.CACHE_TIMEOUT_MS_DEFAULT);
-      shortCircuitMmapCacheRetryTimeout = conf.getLong(
-          HdfsClientConfigKeys.Mmap.RETRY_TIMEOUT_MS_KEY,
-          HdfsClientConfigKeys.Mmap.RETRY_TIMEOUT_MS_DEFAULT);
-      shortCircuitCacheStaleThresholdMs = conf.getLong(
-          HdfsClientConfigKeys.ShortCircuit.REPLICA_STALE_THRESHOLD_MS_KEY,
-          
HdfsClientConfigKeys.ShortCircuit.REPLICA_STALE_THRESHOLD_MS_DEFAULT);
-      shortCircuitSharedMemoryWatcherInterruptCheckMs = conf.getInt(
-          
DFSConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,
-          
DFSConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT);
-
-      keyProviderCacheExpiryMs = conf.getLong(
-          DFSConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS,
-          DFSConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT);
-    }
-
-    /**
-     * @return the socketCacheCapacity
-     */
-    public int getSocketCacheCapacity() {
-      return socketCacheCapacity;
-    }
-
-    /**
-     * @return the socketCacheExpiry
-     */
-    public long getSocketCacheExpiry() {
-      return socketCacheExpiry;
-    }
-
-    public boolean isUseLegacyBlockReaderLocal() {
-      return useLegacyBlockReaderLocal;
-    }
-
-    public String getDomainSocketPath() {
-      return domainSocketPath;
-    }
-
-    public boolean isShortCircuitLocalReads() {
-      return shortCircuitLocalReads;
-    }
-
-    public boolean isDomainSocketDataTraffic() {
-      return domainSocketDataTraffic;
-    }
-    /**
-     * @return the useLegacyBlockReader
-     */
-    public boolean isUseLegacyBlockReader() {
-      return useLegacyBlockReader;
-    }
-
-    /**
-     * @return the skipShortCircuitChecksums
-     */
-    public boolean isSkipShortCircuitChecksums() {
-      return skipShortCircuitChecksums;
-    }
-
-    /**
-     * @return the shortCircuitBufferSize
-     */
-    public int getShortCircuitBufferSize() {
-      return shortCircuitBufferSize;
-    }
-
-    /**
-     * @return the shortCircuitStreamsCacheSize
-     */
-    public int getShortCircuitStreamsCacheSize() {
-      return shortCircuitStreamsCacheSize;
-    }
-
-    /**
-     * @return the shortCircuitStreamsCacheExpiryMs
-     */
-    public long getShortCircuitStreamsCacheExpiryMs() {
-      return shortCircuitStreamsCacheExpiryMs;
-    }
-
-    /**
-     * @return the shortCircuitSharedMemoryWatcherInterruptCheckMs
-     */
-    public int getShortCircuitSharedMemoryWatcherInterruptCheckMs() {
-      return shortCircuitSharedMemoryWatcherInterruptCheckMs;
-    }
-
-    /**
-     * @return the shortCircuitMmapEnabled
-     */
-    public boolean isShortCircuitMmapEnabled() {
-      return shortCircuitMmapEnabled;
-    }
-
-    /**
-     * @return the shortCircuitMmapCacheSize
-     */
-    public int getShortCircuitMmapCacheSize() {
-      return shortCircuitMmapCacheSize;
-    }
-
-    /**
-     * @return the shortCircuitMmapCacheExpiryMs
-     */
-    public long getShortCircuitMmapCacheExpiryMs() {
-      return shortCircuitMmapCacheExpiryMs;
-    }
-
-    /**
-     * @return the shortCircuitMmapCacheRetryTimeout
-     */
-    public long getShortCircuitMmapCacheRetryTimeout() {
-      return shortCircuitMmapCacheRetryTimeout;
-    }
-
-    /**
-     * @return the shortCircuitCacheStaleThresholdMs
-     */
-    public long getShortCircuitCacheStaleThresholdMs() {
-      return shortCircuitCacheStaleThresholdMs;
-    }
-
-    /**
-     * @return the keyProviderCacheExpiryMs
-     */
-    public long getKeyProviderCacheExpiryMs() {
-      return keyProviderCacheExpiryMs;
-    }
-
-    public String confAsString() {
-      StringBuilder builder = new StringBuilder();
-      builder.append("shortCircuitStreamsCacheSize = ").
-        append(shortCircuitStreamsCacheSize).
-        append(", shortCircuitStreamsCacheExpiryMs = ").
-        append(shortCircuitStreamsCacheExpiryMs).
-        append(", shortCircuitMmapCacheSize = ").
-        append(shortCircuitMmapCacheSize).
-        append(", shortCircuitMmapCacheExpiryMs = ").
-        append(shortCircuitMmapCacheExpiryMs).
-        append(", shortCircuitMmapCacheRetryTimeout = ").
-        append(shortCircuitMmapCacheRetryTimeout).
-        append(", shortCircuitCacheStaleThresholdMs = ").
-        append(shortCircuitCacheStaleThresholdMs).
-        append(", socketCacheCapacity = ").
-        append(socketCacheCapacity).
-        append(", socketCacheExpiry = ").
-        append(socketCacheExpiry).
-        append(", shortCircuitLocalReads = ").
-        append(shortCircuitLocalReads).
-        append(", useLegacyBlockReaderLocal = ").
-        append(useLegacyBlockReaderLocal).
-        append(", domainSocketDataTraffic = ").
-        append(domainSocketDataTraffic).
-        append(", shortCircuitSharedMemoryWatcherInterruptCheckMs = ").
-        append(shortCircuitSharedMemoryWatcherInterruptCheckMs).
-        append(", keyProviderCacheExpiryMs = ").
-        append(keyProviderCacheExpiryMs);
-
-      return builder.toString();
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
index 1a70bd3..a5e22ec 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
 import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
@@ -61,7 +62,6 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import 
org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode.StorageGroup;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import 
org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
@@ -316,7 +316,7 @@ public class Dispatcher {
       try {
         sock.connect(
             NetUtils.createSocketAddr(target.getDatanodeInfo().getXferAddr()),
-            HdfsServerConstants.READ_TIMEOUT);
+            HdfsConstants.READ_TIMEOUT);
 
         sock.setKeepAlive(true);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index 11194dc..0667bdb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -286,12 +286,6 @@ public interface HdfsServerConstants {
     }
   }
 
-  // Timeouts for communicating with DataNode for streaming writes/reads
-  int READ_TIMEOUT = 60 * 1000;
-  int READ_TIMEOUT_EXTENSION = 5 * 1000;
-  int WRITE_TIMEOUT = 8 * 60 * 1000;
-  int WRITE_TIMEOUT_EXTENSION = 5 * 1000; //for write pipeline
-
   /**
    * Defines the NameNode role.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
index abc9390..66fd567 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
@@ -27,11 +27,11 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_SPLIT_THRESHO
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_SPLIT_THRESHOLD_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_NON_LOCAL_LAZY_PERSIST;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_NON_LOCAL_LAZY_PERSIST_DEFAULT;
+import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
+import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
+import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
@@ -55,9 +55,10 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTI
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
 import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.security.SaslPropertiesResolver;
 
 /**
@@ -107,9 +108,9 @@ public class DNConf {
   public DNConf(Configuration conf) {
     this.conf = conf;
     socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
-        HdfsServerConstants.READ_TIMEOUT);
+        HdfsConstants.READ_TIMEOUT);
     socketWriteTimeout = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
-        HdfsServerConstants.WRITE_TIMEOUT);
+        HdfsConstants.WRITE_TIMEOUT);
     socketKeepaliveTimeout = conf.getInt(
         DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
         DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT);
@@ -149,8 +150,8 @@ public class DNConf {
         DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT);
 
     this.dfsclientSlowIoWarningThresholdMs = conf.getLong(
-        DFSConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY,
-        DFSConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT);
+        HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY,
+        HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT);
     this.datanodeSlowIoWarningThresholdMs = conf.getLong(
         DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY,
         DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 7757d8b..ecf139c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -143,7 +143,6 @@ import 
org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
@@ -914,8 +913,8 @@ public class DataNode extends ReconfigurableBase
 
     if (conf.getBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY,
               HdfsClientConfigKeys.Read.ShortCircuit.DEFAULT) ||
-        conf.getBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC,
-              DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT)) {
+        
conf.getBoolean(HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC,
+              
HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT)) {
       DomainPeerServer domainPeerServer =
                 getDomainPeerServer(conf, streamingAddr.getPort());
       if (domainPeerServer != null) {
@@ -936,8 +935,8 @@ public class DataNode extends ReconfigurableBase
     if (domainSocketPath.isEmpty()) {
       if (conf.getBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY,
             HdfsClientConfigKeys.Read.ShortCircuit.DEFAULT) &&
-         
(!conf.getBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
-          DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT))) {
+         
(!conf.getBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
+          
HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT))) {
         LOG.warn("Although short-circuit local reads are configured, " +
             "they are disabled because you didn't configure " +
             DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY);
@@ -2111,7 +2110,7 @@ public class DataNode extends ReconfigurableBase
         }
 
         long writeTimeout = dnConf.socketWriteTimeout + 
-                            HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * 
(targets.length-1);
+                            HdfsConstants.WRITE_TIMEOUT_EXTENSION * 
(targets.length-1);
         OutputStream unbufOut = NetUtils.getOutputStream(sock, writeTimeout);
         InputStream unbufIn = NetUtils.getInputStream(sock);
         DataEncryptionKeyFactory keyFactory =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index 089b7cd..e9cf436 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -72,7 +72,6 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmR
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import 
org.apache.hadoop.hdfs.server.datanode.DataNode.ShortCircuitFdsUnsupportedException;
 import 
org.apache.hadoop.hdfs.server.datanode.DataNode.ShortCircuitFdsVersionException;
 import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry.NewShmInfo;
@@ -705,9 +704,9 @@ class DataXceiver extends Receiver implements Runnable {
         mirrorSock = datanode.newSocket();
         try {
           int timeoutValue = dnConf.socketTimeout
-              + (HdfsServerConstants.READ_TIMEOUT_EXTENSION * targets.length);
+              + (HdfsConstants.READ_TIMEOUT_EXTENSION * targets.length);
           int writeTimeout = dnConf.socketWriteTimeout + 
-                      (HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * 
targets.length);
+                      (HdfsConstants.WRITE_TIMEOUT_EXTENSION * targets.length);
           NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
           mirrorSock.setSoTimeout(timeoutValue);
           mirrorSock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
index c0df244..1436adc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -94,7 +94,7 @@ public class SecureDataNodeStarter implements Daemon {
     InetSocketAddress streamingAddr  = DataNode.getStreamingAddr(conf);
     int socketWriteTimeout = conf.getInt(
         DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
-        HdfsServerConstants.WRITE_TIMEOUT);
+        HdfsConstants.WRITE_TIMEOUT);
 
     ServerSocket ss = (socketWriteTimeout > 0) ? 
         ServerSocketChannel.open().socket() : new ServerSocket();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index a7dc54e..320583b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -24,12 +24,12 @@ import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
+import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
+import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
+import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
+import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 7d4cd7e..2f989d1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.RemotePeerFactory;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.net.TcpPeerServer;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -71,7 +72,6 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
 import org.apache.hadoop.net.NetUtils;
@@ -837,7 +837,7 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
         chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
         targetAddr = NetUtils.createSocketAddr(chosenNode.getXferAddr());
       }  catch (IOException ie) {
-        if (failures >= 
DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) {
+        if (failures >= 
HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) {
           throw new IOException("Could not obtain block " + lblock, ie);
         }
         LOG.info("Could not obtain block from any node:  " + ie);
@@ -873,8 +873,8 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
                 Peer peer = null;
                 Socket s = 
NetUtils.getDefaultSocketFactory(conf).createSocket();
                 try {
-                  s.connect(addr, HdfsServerConstants.READ_TIMEOUT);
-                  s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
+                  s.connect(addr, HdfsConstants.READ_TIMEOUT);
+                  s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
                   peer = TcpPeerServer.peerFromSocketAndKey(
                         dfs.getSaslDataTransferClient(), s, NamenodeFsck.this,
                         blockToken, datanodeId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
index 7a5b39a..992d8b4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
@@ -27,7 +27,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.util.PerformanceAdvisory;
@@ -112,7 +112,7 @@ public class DomainSocketFactory {
     } else {
       if (conf.getDomainSocketPath().isEmpty()) {
         throw new HadoopIllegalArgumentException(feature + " is enabled but "
-            + DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY + " is not set.");
+            + HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY + " is not 
set.");
       } else if (DomainSocket.getLoadingFailureReason() != null) {
         LOG.warn(feature + " cannot be used because "
             + DomainSocket.getLoadingFailureReason());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java
deleted file mode 100644
index ea5e39d..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java
+++ /dev/null
@@ -1,418 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.util;
-
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.Map;
-import java.util.Queue;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.HadoopIllegalArgumentException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.util.Time;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Manage byte array creation and release. 
- */
-@InterfaceAudience.Private
-public abstract class ByteArrayManager {
-  static final Log LOG = LogFactory.getLog(ByteArrayManager.class);
-  private static final ThreadLocal<StringBuilder> debugMessage = new 
ThreadLocal<StringBuilder>() {
-    protected StringBuilder initialValue() {
-      return new StringBuilder();
-    }
-  };
-
-  private static void logDebugMessage() {
-    final StringBuilder b = debugMessage.get();
-    LOG.debug(b);
-    b.setLength(0);
-  }
-
-  static final int MIN_ARRAY_LENGTH = 32;
-  static final byte[] EMPTY_BYTE_ARRAY = {};
-
-  /**
-   * @return the least power of two greater than or equal to n, i.e. return
-   *         the least integer x with x >= n and x a power of two.
-   *
-   * @throws HadoopIllegalArgumentException
-   *           if n <= 0.
-   */
-  public static int leastPowerOfTwo(final int n) {
-    if (n <= 0) {
-      throw new HadoopIllegalArgumentException("n = " + n + " <= 0");
-    }
-
-    final int highestOne = Integer.highestOneBit(n);
-    if (highestOne == n) {
-      return n; // n is a power of two.
-    }
-    final int roundUp = highestOne << 1;
-    if (roundUp < 0) {
-      final long overflow = ((long) highestOne) << 1;
-      throw new ArithmeticException(
-          "Overflow: for n = " + n + ", the least power of two (the least"
-          + " integer x with x >= n and x a power of two) = "
-          + overflow + " > Integer.MAX_VALUE = " + Integer.MAX_VALUE);
-    }
-    return roundUp;
-  }
-
-  /**
-   * A counter with a time stamp so that it is reset automatically
-   * if there is no increment for the time period.
-   */
-  static class Counter {
-    private final long countResetTimePeriodMs;
-    private long count = 0L;
-    private long timestamp = Time.monotonicNow();
-
-    Counter(long countResetTimePeriodMs) {
-      this.countResetTimePeriodMs = countResetTimePeriodMs;
-    }
-
-    synchronized long getCount() {
-      return count;
-    }
-
-    /**
-     * Increment the counter, and reset it if there is no increment
-     * for acertain time period.
-     *
-     * @return the new count.
-     */
-    synchronized long increment() {
-      final long now = Time.monotonicNow();
-      if (now - timestamp > countResetTimePeriodMs) {
-        count = 0; // reset the counter
-      }
-      timestamp = now;
-      return ++count;
-    }
-  }
-
-  /** A map from integers to counters. */
-  static class CounterMap {
-    /** @see ByteArrayManager.Conf#countResetTimePeriodMs */
-    private final long countResetTimePeriodMs;
-    private final Map<Integer, Counter> map = new HashMap<Integer, Counter>();
-
-    private CounterMap(long countResetTimePeriodMs) {
-      this.countResetTimePeriodMs = countResetTimePeriodMs;
-    }
-
-    /**
-     * @return the counter for the given key;
-     *         and create a new counter if it does not exist.
-     */
-    synchronized Counter get(final Integer key, final boolean 
createIfNotExist) {
-      Counter count = map.get(key);
-      if (count == null && createIfNotExist) {
-        count = new Counter(countResetTimePeriodMs);
-        map.put(key, count);
-      }
-      return count;
-    }
-
-    synchronized void clear() {
-      map.clear();
-    }
-  }
-
-  /** Manage byte arrays with the same fixed length. */
-  static class FixedLengthManager {
-    private final int byteArrayLength;
-    private final int maxAllocated;
-    private final Queue<byte[]> freeQueue = new LinkedList<byte[]>();
-
-    private int numAllocated = 0;
-
-    FixedLengthManager(int arrayLength, int maxAllocated) {
-      this.byteArrayLength = arrayLength;
-      this.maxAllocated = maxAllocated;
-    }
-
-    /**
-     * Allocate a byte array.
-     *
-     * If the number of allocated arrays >= maximum, the current thread is
-     * blocked until the number of allocated arrays drops to below the maximum.
-     * 
-     * The byte array allocated by this method must be returned for recycling
-     * via the {@link FixedLengthManager#recycle(byte[])} method.
-     */
-    synchronized byte[] allocate() throws InterruptedException {
-      if (LOG.isDebugEnabled()) {
-        debugMessage.get().append(", ").append(this);
-      }
-      for(; numAllocated >= maxAllocated;) {
-        if (LOG.isDebugEnabled()) {
-          debugMessage.get().append(": wait ...");
-          logDebugMessage();
-        }
-
-        wait();
-
-        if (LOG.isDebugEnabled()) {
-          debugMessage.get().append("wake up: ").append(this);
-        }
-      }
-      numAllocated++;
-
-      final byte[] array = freeQueue.poll();
-      if (LOG.isDebugEnabled()) {
-        debugMessage.get().append(", recycled? ").append(array != null);
-      }
-      return array != null? array : new byte[byteArrayLength];
-    }
-
-    /**
-     * Recycle the given byte array, which must have the same length as the
-     * array length managed by this object.
-     *
-     * The byte array may or may not be allocated
-     * by the {@link FixedLengthManager#allocate()} method.
-     */
-    synchronized int recycle(byte[] array) {
-      Preconditions.checkNotNull(array);
-      Preconditions.checkArgument(array.length == byteArrayLength);
-      if (LOG.isDebugEnabled()) {
-        debugMessage.get().append(", ").append(this);
-      }
-
-      notify();
-      numAllocated--;
-      if (numAllocated < 0) {
-        // it is possible to drop below 0 since
-        // some byte arrays may not be created by the allocate() method.
-        numAllocated = 0;
-      }
-
-      if (freeQueue.size() < maxAllocated - numAllocated) {
-        if (LOG.isDebugEnabled()) {
-          debugMessage.get().append(", freeQueue.offer");
-        }
-        freeQueue.offer(array);
-      }
-      return freeQueue.size();
-    }
-
-    @Override
-    public synchronized String toString() {
-      return "[" + byteArrayLength + ": " + numAllocated + "/"
-          + maxAllocated + ", free=" + freeQueue.size() + "]";
-    }
-  }
-
-  /** A map from array lengths to byte array managers. */
-  static class ManagerMap {
-    private final int countLimit;
-    private final Map<Integer, FixedLengthManager> map = new HashMap<Integer, 
FixedLengthManager>();
-
-    ManagerMap(int countLimit) {
-      this.countLimit = countLimit;
-    }
-
-    /** @return the manager for the given array length. */
-    synchronized FixedLengthManager get(final Integer arrayLength,
-        final boolean createIfNotExist) {
-      FixedLengthManager manager = map.get(arrayLength);
-      if (manager == null && createIfNotExist) {
-        manager = new FixedLengthManager(arrayLength, countLimit);
-        map.put(arrayLength, manager);
-      }
-      return manager;
-    }
-
-    synchronized void clear() {
-      map.clear();
-    }
-  }
-
-  public static class Conf {
-    /**
-     * The count threshold for each array length so that a manager is created
-     * only after the allocation count exceeds the threshold.
-     */
-    private final int countThreshold;
-    /**
-     * The maximum number of arrays allowed for each array length.
-     */
-    private final int countLimit;
-    /**
-     * The time period in milliseconds that the allocation count for each array
-     * length is reset to zero if there is no increment.
-     */
-    private final long countResetTimePeriodMs;
-
-    public Conf(int countThreshold, int countLimit, long 
countResetTimePeriodMs) {
-      this.countThreshold = countThreshold;
-      this.countLimit = countLimit;
-      this.countResetTimePeriodMs = countResetTimePeriodMs;
-    }
-  }
-
-  /**
-   * Create a byte array for the given length, where the length of
-   * the returned array is larger than or equal to the given length.
-   *
-   * The current thread may be blocked if some resource is unavailable.
-   * 
-   * The byte array created by this method must be released
-   * via the {@link ByteArrayManager#release(byte[])} method.
-   *
-   * @return a byte array with length larger than or equal to the given length.
-   */
-  public abstract byte[] newByteArray(int size) throws InterruptedException;
-  
-  /**
-   * Release the given byte array.
-   * 
-   * The byte array may or may not be created
-   * by the {@link ByteArrayManager#newByteArray(int)} method.
-   * 
-   * @return the number of free array.
-   */
-  public abstract int release(byte[] array);
-
-  public static ByteArrayManager newInstance(Conf conf) {
-    return conf == null? new NewByteArrayWithoutLimit(): new Impl(conf);
-  }
-
-  /**
-   * A dummy implementation which simply calls new byte[].
-   */
-  static class NewByteArrayWithoutLimit extends ByteArrayManager {
-    @Override
-    public byte[] newByteArray(int size) throws InterruptedException {
-      return new byte[size];
-    }
-    
-    @Override
-    public int release(byte[] array) {
-      return 0;
-    }
-  }
-
-  /**
-   * Manage byte array allocation and provide a mechanism for recycling the 
byte
-   * array objects.
-   */
-  static class Impl extends ByteArrayManager {
-    private final Conf conf;
-  
-    private final CounterMap counters;
-    private final ManagerMap managers;
-  
-    Impl(Conf conf) {
-      this.conf = conf;
-      this.counters = new CounterMap(conf.countResetTimePeriodMs);
-      this.managers = new ManagerMap(conf.countLimit);
-    }
-  
-    /**
-     * Allocate a byte array, where the length of the allocated array
-     * is the least power of two of the given length
-     * unless the given length is less than {@link #MIN_ARRAY_LENGTH}.
-     * In such case, the returned array length is equal to {@link 
#MIN_ARRAY_LENGTH}.
-     *
-     * If the number of allocated arrays exceeds the capacity,
-     * the current thread is blocked until
-     * the number of allocated arrays drops to below the capacity.
-     * 
-     * The byte array allocated by this method must be returned for recycling
-     * via the {@link Impl#release(byte[])} method.
-     *
-     * @return a byte array with length larger than or equal to the given 
length.
-     */
-    @Override
-    public byte[] newByteArray(final int arrayLength) throws 
InterruptedException {
-      Preconditions.checkArgument(arrayLength >= 0);
-      if (LOG.isDebugEnabled()) {
-        debugMessage.get().append("allocate(").append(arrayLength).append(")");
-      }
-  
-      final byte[] array;
-      if (arrayLength == 0) {
-        array = EMPTY_BYTE_ARRAY;
-      } else {
-        final int powerOfTwo = arrayLength <= MIN_ARRAY_LENGTH?
-            MIN_ARRAY_LENGTH: leastPowerOfTwo(arrayLength);
-        final long count = counters.get(powerOfTwo, true).increment();
-        final boolean aboveThreshold = count > conf.countThreshold;
-        // create a new manager only if the count is above threshold.
-        final FixedLengthManager manager = managers.get(powerOfTwo, 
aboveThreshold);
-  
-        if (LOG.isDebugEnabled()) {
-          debugMessage.get().append(": count=").append(count)
-              .append(aboveThreshold? ", aboveThreshold": ", belowThreshold");
-        }
-        array = manager != null? manager.allocate(): new byte[powerOfTwo];
-      }
-  
-      if (LOG.isDebugEnabled()) {
-        debugMessage.get().append(", return 
byte[").append(array.length).append("]");
-        logDebugMessage();
-      }
-      return array;
-    }
-  
-    /**
-     * Recycle the given byte array.
-     * 
-     * The byte array may or may not be allocated
-     * by the {@link Impl#newByteArray(int)} method.
-     * 
-     * This is a non-blocking call.
-     */
-    @Override
-    public int release(final byte[] array) {
-      Preconditions.checkNotNull(array);
-      if (LOG.isDebugEnabled()) {
-        debugMessage.get().append("recycle: 
array.length=").append(array.length);
-      }
-  
-      final int freeQueueSize;
-      if (array.length == 0) {
-        freeQueueSize = -1;
-      } else {
-        final FixedLengthManager manager = managers.get(array.length, false);
-        freeQueueSize = manager == null? -1: manager.recycle(array);
-      }
-  
-      if (LOG.isDebugEnabled()) {
-        debugMessage.get().append(", freeQueueSize=").append(freeQueueSize);
-        logDebugMessage();
-      }
-      return freeQueueSize;
-    }
-  
-    CounterMap getCounters() {
-      return counters;
-    }
-  
-    ManagerMap getManagers() {
-      return managers;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/TestFiPipelines.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/TestFiPipelines.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/TestFiPipelines.java
index ddeb57d..7cc7898 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/TestFiPipelines.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/TestFiPipelines.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fi.FiTestUtil;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -226,11 +227,11 @@ public class TestFiPipelines {
     conf = new Configuration();
     int customPerChecksumSize = 700;
     int customBlockSize = customPerChecksumSize * 3;
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 100);
-    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 
customPerChecksumSize);
+    conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 100);
+    conf.setInt(HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 
customPerChecksumSize);
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 
customBlockSize / 2);
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 0);
+    conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 
customBlockSize / 2);
+    conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 0);
   }
 
   private static void initLoggers() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java
index fcad32e..7a9a76f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.log4j.Level;
@@ -52,7 +53,7 @@ public class TestFiDataTransferProtocol {
   static {
     conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, REPLICATION);
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
+    conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
   }
 
   static private FSDataOutputStream createFile(FileSystem fs, Path p

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java
index 5832bf0..deda317 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.log4j.Level;
 import org.junit.Assert;
@@ -57,8 +58,8 @@ public class TestFiDataTransferProtocol2 {
   static {
     conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, REPLICATION);
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, PACKET_SIZE);
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
+    conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 
PACKET_SIZE);
+    conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
   }
 
   static final byte[] bytes = new byte[MAX_N_PACKET * PACKET_SIZE];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
index e4380c7..3455f55 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
@@ -337,7 +337,7 @@ public class TestEnhancedByteBufferAccess {
     ByteBuffer results[] = { null, null, null, null };
 
     DistributedFileSystem fs = null;
-    conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
+    conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
@@ -599,7 +599,7 @@ public class TestEnhancedByteBufferAccess {
     conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
         false);
     final String CONTEXT = "testZeroCopyReadOfCachedData";
-    conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
+    conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
     conf.setLong(DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
         DFSTestUtil.roundUpToMultiple(TEST_FILE_LENGTH,
           (int) 
NativeIO.POSIX.getCacheManipulator().getOperatingSystemPageSize()));
@@ -722,7 +722,7 @@ public class TestEnhancedByteBufferAccess {
     final String CONTEXT = "testClientMmapDisable";
     FSDataInputStream fsIn = null;
     DistributedFileSystem fs = null;
-    conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
+    conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
 
     try {
       // With HdfsClientConfigKeys.Mmap.ENABLED_KEY set to false,
@@ -753,7 +753,7 @@ public class TestEnhancedByteBufferAccess {
       // Now try again with HdfsClientConfigKeys.Mmap.CACHE_SIZE_KEY == 0.
       conf.setBoolean(HdfsClientConfigKeys.Mmap.ENABLED_KEY, true);
       conf.setInt(HdfsClientConfigKeys.Mmap.CACHE_SIZE_KEY, 0);
-      conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT + ".1");
+      conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT + ".1");
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
       cluster.waitActive();
       fs = cluster.getFileSystem();
@@ -784,7 +784,7 @@ public class TestEnhancedByteBufferAccess {
     MiniDFSCluster cluster = null;
     final Path TEST_PATH = new Path("/a");
     final String CONTEXT = "test2GBMmapLimit";
-    conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
+    conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
 
     FSDataInputStream fsIn = null, fsIn2 = null;
     ByteBuffer buf1 = null, buf2 = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java
index 7193fe2..e25a754 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.fs;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -42,7 +41,7 @@ public class TestUnbuffer {
     Configuration conf = new Configuration();
     // Set a new ClientContext.  This way, we will have our own PeerCache,
     // rather than sharing one with other unit tests.
-    conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,
+    conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT,
         "testUnbufferClosesSocketsContext");
 
     // Disable short-circuit reads.  With short-circuit, we wouldn't hold open 
a
@@ -50,9 +49,9 @@ public class TestUnbuffer {
     conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false);
 
     // Set a really long socket timeout to avoid test timing issues.
-    conf.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
+    conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
         100000000L);
-    conf.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,
+    conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,
         100000000L);
 
     MiniDFSCluster cluster = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java
index a1bac18..bed128b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java
@@ -22,10 +22,10 @@ import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
+import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
+import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
+import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
+import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
 import static org.junit.Assert.assertEquals;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
index 88b7f37..6d8bec3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
@@ -36,10 +36,10 @@ import org.apache.hadoop.hdfs.net.TcpPeerServer;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry;
@@ -202,8 +202,8 @@ public class BlockReaderTestUtil {
           Socket sock = NetUtils.
               getDefaultSocketFactory(fs.getConf()).createSocket();
           try {
-            sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
-            sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
+            sock.connect(addr, HdfsConstants.READ_TIMEOUT);
+            sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
             peer = TcpPeerServer.peerFromSocket(sock);
           } finally {
             if (peer == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java
index 4860244..6b9c138 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -52,9 +53,9 @@ public class FileAppendTest4 {
   private static DistributedFileSystem fs;
 
   private static void init(Configuration conf) {
-    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM);
+    conf.setInt(HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 
BYTES_PER_CHECKSUM);
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, PACKET_SIZE);
+    conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 
PACKET_SIZE);
   }
   
   @BeforeClass

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java
index d8aceff..a8ca9c7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java
@@ -18,8 +18,8 @@
 package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC;
+import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CONTEXT;
+import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS;
 import static org.hamcrest.CoreMatchers.equalTo;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java
index c69774c..aad670a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java
@@ -126,10 +126,10 @@ public class TestBlockReaderLocal {
     HdfsConfiguration conf = new HdfsConfiguration();
     conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
         !checksum);
-    conf.setLong(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,
+    conf.setLong(HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,
         BlockReaderLocalTest.BYTES_PER_CHECKSUM);
     conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "CRC32C");
-    conf.setLong(DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD, readahead);
+    conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_CACHE_READAHEAD, readahead);
     test.setConfiguration(conf);
     FileInputStream dataIn = null, metaIn = null;
     final Path TEST_PATH = new Path("/a");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
index 760a61f..3deca17 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
@@ -64,12 +64,12 @@ public class TestBlockReaderLocalLegacy {
           getAbsolutePath());
     }
     conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
-    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, 
true);
+    
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, 
true);
     conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
         false);
     conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
         UserGroupInformation.getCurrentUser().getShortUserName());
-    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, 
false);
+    
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, 
false);
     // Set short retry timeouts so this test runs faster
     conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
     return conf;
@@ -164,7 +164,7 @@ public class TestBlockReaderLocalLegacy {
   public void testBlockReaderLocalLegacyWithAppend() throws Exception {
     final short REPL_FACTOR = 1;
     final HdfsConfiguration conf = getConfiguration(null);
-    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, 
true);
+    
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, 
true);
 
     final MiniDFSCluster cluster =
         new MiniDFSCluster.Builder(conf).numDataNodes(1).build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
index 91de2b8..ca98441 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
@@ -169,7 +169,7 @@ public class TestClientProtocolForPipelineRecovery {
   @Test
   public void testPipelineRecoveryOnOOB() throws Exception {
     Configuration conf = new HdfsConfiguration();
-    conf.set(DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY, "15");
+    conf.set(HdfsClientConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY, 
"15");
     MiniDFSCluster cluster = null;
     try {
       int numDataNodes = 1;
@@ -207,7 +207,7 @@ public class TestClientProtocolForPipelineRecovery {
   @Test
   public void testPipelineRecoveryOnRestartFailure() throws Exception {
     Configuration conf = new HdfsConfiguration();
-    conf.set(DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY, "5");
+    conf.set(HdfsClientConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY, 
"5");
     MiniDFSCluster cluster = null;
     try {
       int numDataNodes = 2;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
index c518c8c..afa5d27 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
@@ -25,6 +25,7 @@ import java.net.InetSocketAddress;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -89,8 +90,8 @@ public class TestConnCache {
     // instances.  Also use a really long socket timeout so that nothing
     // gets closed before we get around to checking the cache size at the end.
     final String contextName = "testReadFromOneDNContext";
-    configuration.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, contextName);
-    configuration.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
+    configuration.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, contextName);
+    configuration.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
         100000000L);
     BlockReaderTestUtil util = new BlockReaderTestUtil(1, configuration);
     final Path testFile = new Path("/testConnCache.dat");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index 441ef9c..5082525 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
+import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -177,11 +177,11 @@ public class TestDFSClientRetries {
                                                   InterruptedException { 
     final int writeTimeout = 100; //milliseconds.
     // set a very short write timeout for datanode, so that tests runs fast.
-    conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, 
writeTimeout); 
+    conf.setInt(HdfsClientConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, 
writeTimeout);
     // set a smaller block size
     final int blockSize = 10*1024*1024;
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, 1);
+    
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, 1);
     // set a small buffer size
     final int bufferSize = 4096;
     conf.setInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, bufferSize);
@@ -643,7 +643,7 @@ public class TestDFSClientRetries {
       DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT);
     conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,
       xcievers);
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
+    conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
       retries);
     conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, timeWin);
     // Disable keepalive

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3aac4758/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
index 26412c8..9d88384 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
@@ -73,7 +73,7 @@ public class TestDFSInputStream {
   @Test(timeout=60000)
   public void testSkipWithRemoteBlockReader() throws IOException {
     Configuration conf = new Configuration();
-    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
+    conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, 
true);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     try {
       testSkipInner(cluster);

Reply via email to