This is an automated email from the ASF dual-hosted git repository.

jinglun pushed a commit to branch HADOOP-19236
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 9363fe6251cd0af153cb0f85126f35cb05c35115
Author: lijinglun <lijing...@bytedance.com>
AuthorDate: Tue Oct 29 18:08:28 2024 +0800

    Integration of TOS: Refactor config keys.
---
 .../org/apache/hadoop/fs/tosfs/RawFileSystem.java  | 17 +++--
 .../org/apache/hadoop/fs/tosfs/conf/ConfKeys.java  | 85 +++++++---------------
 .../apache/hadoop/fs/tosfs/conf/FileStoreKeys.java | 12 +--
 .../org/apache/hadoop/fs/tosfs/conf/TosKeys.java   | 37 +++++++---
 .../apache/hadoop/fs/tosfs/object/FileStore.java   | 25 +++----
 .../hadoop/fs/tosfs/object/ObjectOutputStream.java | 30 ++++----
 .../org/apache/hadoop/fs/tosfs/object/tos/TOS.java |  8 +-
 .../apache/hadoop/fs/tosfs/ops/DefaultFsOps.java   |  4 +-
 .../org/apache/hadoop/fs/tosfs/ops/RenameOp.java   | 14 ++--
 .../apache/hadoop/fs/tosfs/TestTosChecksum.java    |  6 +-
 .../hadoop/fs/tosfs/contract/TestChecksum.java     |  2 +-
 .../fs/tosfs/contract/TestGetFileStatus.java       |  5 +-
 .../hadoop/fs/tosfs/contract/TestRename.java       |  6 +-
 .../fs/tosfs/object/TestObjectOutputStream.java    | 14 ++--
 .../fs/tosfs/object/tos/TestTOSObjectStorage.java  |  7 +-
 .../hadoop/fs/tosfs/ops/TestDefaultFsOps.java      |  8 +-
 .../apache/hadoop/fs/tosfs/ops/TestRenameOp.java   |  4 +-
 17 files changed, 132 insertions(+), 152 deletions(-)

diff --git 
a/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/RawFileSystem.java
 
b/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/RawFileSystem.java
index cd58a752856..8675ca7b774 100644
--- 
a/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/RawFileSystem.java
+++ 
b/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/RawFileSystem.java
@@ -139,8 +139,8 @@ public class RawFileSystem extends FileSystem {
 
     // Parse the range size from the hadoop conf.
     long rangeSize = getConf().getLong(
-        ConfKeys.OBJECT_STREAM_RANGE_SIZE,
-        ConfKeys.OBJECT_STREAM_RANGE_SIZE_DEFAULT);
+        ConfKeys.FS_OBJECT_STREAM_RANGE_SIZE,
+        ConfKeys.FS_OBJECT_STREAM_RANGE_SIZE_DEFAULT);
     Preconditions.checkArgument(rangeSize > 0, "Object storage range size must 
be positive.");
 
     FSInputStream fsIn = new ObjectMultiRangeInputStream(taskThreadPool, 
storage, path,
@@ -602,12 +602,13 @@ public class RawFileSystem extends FileSystem {
       throw new FileNotFoundException(String.format("Bucket: %s not found.", 
uri.getAuthority()));
     }
 
-    int taskThreadPoolSize =
-        getConf().getInt(ConfKeys.TASK_THREAD_POOL_SIZE, 
ConfKeys.TASK_THREAD_POOL_SIZE_DEFAULT);
+    int taskThreadPoolSize = 
getConf().getInt(ConfKeys.FS_TASK_THREAD_POOL_SIZE.key(storage.scheme()),
+        ConfKeys.FS_TASK_THREAD_POOL_SIZE_DEFAULT);
     this.taskThreadPool = ThreadPools.newWorkerPool(TASK_THREAD_POOL_PREFIX, 
taskThreadPoolSize);
 
-    int uploadThreadPoolSize = 
getConf().getInt(ConfKeys.MULTIPART_THREAD_POOL_SIZE,
-        ConfKeys.MULTIPART_THREAD_POOL_SIZE_DEFAULT);
+    int uploadThreadPoolSize =
+        
getConf().getInt(ConfKeys.FS_MULTIPART_THREAD_POOL_SIZE.key(storage.scheme()),
+            ConfKeys.FS_MULTIPART_THREAD_POOL_SIZE_DEFAULT);
     this.uploadThreadPool = 
ThreadPools.newWorkerPool(MULTIPART_THREAD_POOL_PREFIX, uploadThreadPoolSize);
 
     if (storage.bucket().isDirectory()) {
@@ -647,8 +648,8 @@ public class RawFileSystem extends FileSystem {
       // Compatible with HDFS
       throw new FileNotFoundException(String.format("Path is not a file, %s", 
f));
     }
-    if (!getConf().getBoolean(ConfKeys.CHECKSUM_ENABLED.key(storage.scheme()),
-        ConfKeys.CHECKSUM_ENABLED_DEFAULT)) {
+    if 
(!getConf().getBoolean(ConfKeys.FS_CHECKSUM_ENABLED.key(storage.scheme()),
+        ConfKeys.FS_CHECKSUM_ENABLED_DEFAULT)) {
       return null;
     }
 
diff --git 
a/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/conf/ConfKeys.java
 
b/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/conf/ConfKeys.java
index 4e354ed99a2..517f308c91f 100644
--- 
a/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/conf/ConfKeys.java
+++ 
b/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/conf/ConfKeys.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.fs.tosfs.conf;
 
-import org.apache.hadoop.fs.tosfs.object.ChecksumType;
 import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
 
 public class ConfKeys {
@@ -53,56 +52,51 @@ public class ConfKeys {
   /**
    * The multipart upload part size of the given object storage, e.g. 
fs.tos.multipart.size.
    */
-  public static final ArgumentKey MULTIPART_SIZE = new 
ArgumentKey("fs.%s.multipart.size");
-  public static final long MULTIPART_SIZE_DEFAULT = 8L << 20;
+  public static final ArgumentKey FS_MULTIPART_SIZE = new 
ArgumentKey("fs.%s.multipart.size");
+  public static final long FS_MULTIPART_SIZE_DEFAULT = 8L << 20;
 
   /**
    * The threshold (larger than this value) to enable multipart upload during 
copying objects
    * in the given object storage. If the copied data size is less than 
threshold, will copy data via
    * executing copyObject instead of uploadPartCopy. E.g. 
fs.tos.multipart.copy-threshold
    */
-  public static final ArgumentKey MULTIPART_COPY_THRESHOLD =
+  public static final ArgumentKey FS_MULTIPART_COPY_THRESHOLD =
       new ArgumentKey("fs.%s.multipart.copy-threshold");
-  public static final long MULTIPART_COPY_THRESHOLD_DEFAULT = 5L << 20;
+  public static final long FS_MULTIPART_COPY_THRESHOLD_DEFAULT = 5L << 20;
 
   /**
    * The threshold which control whether enable multipart upload during 
writing data to the given
    * object storage, if the write data size is less than threshold, will write 
data via simple put
    * instead of multipart upload. E.g. fs.tos.multipart.threshold.
    */
-  public static final ArgumentKey MULTIPART_THRESHOLD =
+  public static final ArgumentKey FS_MULTIPART_THRESHOLD =
       new ArgumentKey("fs.%s.multipart.threshold");
-  public static final long MULTIPART_THRESHOLD_DEFAULT = 10 << 20;
+  public static final long FS_MULTIPART_THRESHOLD_DEFAULT = 10 << 20;
 
   /**
    * The max byte size which will buffer the staging data in-memory before 
flushing to the staging
    * file. It will decrease the random write in local staging disk 
dramatically if writing plenty of
    * small files.
    */
-  public static final String MULTIPART_STAGING_BUFFER_SIZE = 
"fs.tos.multipart.staging-buffer-size";
-  public static final int MULTIPART_STAGING_BUFFER_SIZE_DEFAULT = 4 << 10;
+  public static final ArgumentKey FS_MULTIPART_STAGING_BUFFER_SIZE =
+      new ArgumentKey("fs.%s.multipart.staging-buffer-size");
+  public static final int FS_MULTIPART_STAGING_BUFFER_SIZE_DEFAULT = 4 << 10;
 
   /**
    * The multipart upload part staging dir(s) of the given object storage.
    * e.g. fs.tos.multipart.staging-dir.
    * Separate the staging dirs with comma if there are many staging dir paths.
    */
-  public static final String MULTIPART_STAGING_DIR = 
"fs.tos.multipart.staging-dir";
-  public static final String MULTIPART_STAGING_DIR_DEFAULT = 
defaultDir("multipart-staging-dir");
-
-  /**
-   * The batch size of deleting multiple objects per request for the given 
object storage.
-   * e.g. fs.tos.delete.batch-size
-   */
-  public static final String BATCH_DELETE_SIZE = "fs.tos.delete.batch-size";
-  public static final int BATCH_DELETE_SIZE_DEFAULT = 250;
+  public static final ArgumentKey FS_MULTIPART_STAGING_DIR =
+      new ArgumentKey("fs.%s.multipart.staging-dir");
+  public static final String FS_MULTIPART_STAGING_DIR_DEFAULT = 
defaultDir("multipart-staging-dir");
 
   /**
    * True to create the missed parent dir asynchronously during deleting or 
renaming a file or dir.
    */
-  public static final ArgumentKey ASYNC_CREATE_MISSED_PARENT =
+  public static final ArgumentKey FS_ASYNC_CREATE_MISSED_PARENT =
       new ArgumentKey("fs.%s.missed.parent.dir.async-create");
-  public static final boolean ASYNC_CREATE_MISSED_PARENT_DEFAULT = true;
+  public static final boolean FS_ASYNC_CREATE_MISSED_PARENT_DEFAULT = true;
 
   /**
    * Whether using rename semantic of object storage during rename files, 
otherwise using
@@ -112,65 +106,40 @@ public class ConfKeys {
    * If you are using TOS, you have to send putBucketRename request before 
sending rename request,
    * otherwise MethodNotAllowed exception will be thrown.
    */
-  public static final ArgumentKey OBJECT_RENAME_ENABLED = new 
ArgumentKey("fs.%s.rename.enabled");
-  public static final boolean OBJECT_RENAME_ENABLED_DEFAULT = false;
+  public static final ArgumentKey FS_OBJECT_RENAME_ENABLED =
+      new ArgumentKey("fs.%s.rename.enabled");
+  public static final boolean FS_OBJECT_RENAME_ENABLED_DEFAULT = false;
 
   /**
    * The range size when open object storage input stream. Value must be 
positive.
    */
-  public static final String OBJECT_STREAM_RANGE_SIZE = 
"proton.objectstorage.stream.range-size";
-  public static final long OBJECT_STREAM_RANGE_SIZE_DEFAULT = Long.MAX_VALUE;
+  public static final String FS_OBJECT_STREAM_RANGE_SIZE = 
"fs.objectstorage.stream.range-size";
+  public static final long FS_OBJECT_STREAM_RANGE_SIZE_DEFAULT = 
Long.MAX_VALUE;
 
   /**
    * The size of thread pool used for running tasks in parallel for the given 
object fs,
    * e.g. delete objects, copy files. the key example: 
fs.tos.task.thread-pool-size.
    */
-  public static final String TASK_THREAD_POOL_SIZE = 
"fs.tos.task.thread-pool-size";
-  public static final int TASK_THREAD_POOL_SIZE_DEFAULT =
+  public static final ArgumentKey FS_TASK_THREAD_POOL_SIZE =
+      new ArgumentKey("fs.%s.task.thread-pool-size");
+  public static final int FS_TASK_THREAD_POOL_SIZE_DEFAULT =
       Math.max(2, Runtime.getRuntime().availableProcessors());
 
   /**
    * The size of thread pool used for uploading multipart in parallel for the 
given object storage,
    * e.g. fs.tos.multipart.thread-pool-size
    */
-  public static final String MULTIPART_THREAD_POOL_SIZE = 
"fs.tos.multipart.thread-pool-size";
-  public static final int MULTIPART_THREAD_POOL_SIZE_DEFAULT =
+  public static final ArgumentKey FS_MULTIPART_THREAD_POOL_SIZE =
+      new ArgumentKey("fs.%s.multipart.thread-pool-size");
+  public static final int FS_MULTIPART_THREAD_POOL_SIZE_DEFAULT =
       Math.max(2, Runtime.getRuntime().availableProcessors());
 
-  /**
-   * Whether enable tos getFileStatus API or not, which returns the object 
info directly in one RPC
-   * request, otherwise, might need to send three RPC requests to get object 
info.
-   * For example, there is a key 'a/b/c' exists in TOS, and we want to get 
object status of 'a/b',
-   * the GetFileStatus('a/b') will return the prefix 'a/b/' as a directory 
object directly. If this
-   * property is disabled, we need to head('a/b') at first, and then 
head('a/b/'), and last call
-   * list('a/b/', limit=1) to get object info. Using GetFileStatus API can 
reduce the RPC call
-   * times.
-   */
-  public static final String TOS_GET_FILE_STATUS_ENABLED = 
"fs.tos.get-file-status.enabled";
-  public static final boolean TOS_GET_FILE_STATUS_ENABLED_DEFAULT = true;
-
   /**
    * The toggle indicates whether enable checksum during getting file status 
for the given object.
    * E.g. fs.tos.checksum.enabled
    */
-  public static final ArgumentKey CHECKSUM_ENABLED = new 
ArgumentKey("fs.%s.checksum.enabled");
-  public static final boolean CHECKSUM_ENABLED_DEFAULT = true;
-
-  /**
-   * The key indicates the name of the tos checksum algorithm. Specify the 
algorithm name to compare
-   * checksums between different storage systems. For example to compare 
checksums between hdfs and
-   * tos, we need to configure the algorithm name to COMPOSITE-CRC32C.
-   */
-  public static final String TOS_CHECKSUM_ALGORITHM = 
"fs.tos.checksum-algorithm";
-  public static final String TOS_CHECKSUM_ALGORITHM_DEFAULT = 
"PROTON-CHECKSUM";
-
-  /**
-   * The key indicates how to retrieve file checksum from tos, error will be 
thrown if the
-   * configured checksum type is not supported by tos. The supported checksum 
types are:
-   * CRC32C, CRC64ECMA.
-   */
-  public static final String TOS_CHECKSUM_TYPE = "fs.tos.checksum-type";
-  public static final String TOS_CHECKSUM_TYPE_DEFAULT = 
ChecksumType.CRC64ECMA.name();
+  public static final ArgumentKey FS_CHECKSUM_ENABLED = new 
ArgumentKey("fs.%s.checksum.enabled");
+  public static final boolean FS_CHECKSUM_ENABLED_DEFAULT = true;
 
   public static String defaultDir(String basename) {
     String tmpdir = System.getProperty("java.io.tmpdir");
diff --git 
a/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/conf/FileStoreKeys.java
 
b/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/conf/FileStoreKeys.java
index 557ab5cbc74..3411821d568 100644
--- 
a/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/conf/FileStoreKeys.java
+++ 
b/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/conf/FileStoreKeys.java
@@ -22,17 +22,13 @@ import org.apache.hadoop.fs.tosfs.object.ChecksumType;
 
 public class FileStoreKeys {
 
-  /**
-   * File store object storage endpoint to connect to.
-   */
-  public static final String FS_FILESTORE_ENDPOINT = "fs.filestore.endpoint";
-
   /**
    * The key indicates the name of the filestore checksum algorithm. Specify 
the algorithm name to
    * satisfy different storage systems. For example, the hdfs style name is 
COMPOSITE-CRC32 and
    * COMPOSITE-CRC32C.
    */
   public static final String FS_FILESTORE_CHECKSUM_ALGORITHM = 
"fs.filestore.checksum-algorithm";
+  public static final String FS_FILESTORE_CHECKSUM_ALGORITHM_DEFAULT = 
"TOS-CHECKSUM";
 
   /**
    * The key indicates how to retrieve file checksum from filestore, error 
will be thrown if the
@@ -40,10 +36,4 @@ public class FileStoreKeys {
    */
   public static final String FS_FILESTORE_CHECKSUM_TYPE = 
"fs.filestore.checksum-type";
   public static final String FS_FILESTORE_CHECKSUM_TYPE_DEFAULT = 
ChecksumType.MD5.name();
-
-  /**
-   * The batch size of deleting multiple objects per request for the given 
object storage.
-   */
-  public static final String FS_FILESTORE_BATCH_DELETE_SIZE = 
"fs.filestore.delete.batch-size";
-  public static final int FS_FILESTORE_BATCH_DELETE_SIZE_DEFAULT = 250;
 }
diff --git 
a/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/conf/TosKeys.java
 
b/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/conf/TosKeys.java
index e44e1bb73ca..8c16306811e 100644
--- 
a/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/conf/TosKeys.java
+++ 
b/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/conf/TosKeys.java
@@ -21,9 +21,6 @@ package org.apache.hadoop.fs.tosfs.conf;
 import org.apache.hadoop.fs.tosfs.object.ChecksumType;
 import org.apache.hadoop.fs.tosfs.object.tos.TOSErrorCodes;
 
-import java.util.Collections;
-import java.util.List;
-
 public class TosKeys {
 
   /**
@@ -137,12 +134,6 @@ public class TosKeys {
   public static final String FS_TOS_HTTP_DNS_CACHE_TIME_MINUTES = 
"fs.tos.http.dnsCacheTimeMinutes";
   public static final int FS_TOS_HTTP_DNS_CACHE_TIME_MINUTES_DEFAULT = 0;
 
-  /**
-   * True to create the missed parent dir asynchronously during deleting or 
renaming a file or dir.
-   */
-  public static final String FS_ASYNC_CREATE_MISSED_PARENT = 
"fs.tos.missed.parent.dir.async-create";
-  public static final boolean FS_ASYNC_CREATE_MISSED_PARENT_DEFAULT = false;
-
   /**
    * Used for directory bucket, whether enable recursive delete capability in 
TOS server, which will
    * atomic delete all objects under given dir(inclusive), otherwise the 
client will list all sub
@@ -253,4 +244,32 @@ public class TosKeys {
    */
   public static final String FS_TOS_CRC_CHECK_ENABLED = 
"fs.tos.crc.check.enable";
   public static final boolean FS_TOS_CRC_CHECK_ENABLED_DEFAULT = true;
+
+  /**
+   * Whether enable tos getFileStatus API or not, which returns the object 
info directly in one RPC
+   * request, otherwise, might need to send three RPC requests to get object 
info.
+   * For example, there is a key 'a/b/c' exists in TOS, and we want to get 
object status of 'a/b',
+   * the GetFileStatus('a/b') will return the prefix 'a/b/' as a directory 
object directly. If this
+   * property is disabled, we need to head('a/b') at first, and then 
head('a/b/'), and last call
+   * list('a/b/', limit=1) to get object info. Using GetFileStatus API can 
reduce the RPC call
+   * times.
+   */
+  public static final String FS_TOS_GET_FILE_STATUS_ENABLED = 
"fs.tos.get-file-status.enabled";
+  public static final boolean FS_TOS_GET_FILE_STATUS_ENABLED_DEFAULT = true;
+
+  /**
+   * The key indicates the name of the tos checksum algorithm. Specify the 
algorithm name to compare
+   * checksums between different storage systems. For example to compare 
checksums between hdfs and
+   * tos, we need to configure the algorithm name to COMPOSITE-CRC32C.
+   */
+  public static final String FS_TOS_CHECKSUM_ALGORITHM = 
"fs.tos.checksum-algorithm";
+  public static final String FS_TOS_CHECKSUM_ALGORITHM_DEFAULT = 
"TOS-CHECKSUM";
+
+  /**
+   * The key indicates how to retrieve file checksum from tos, error will be 
thrown if the
+   * configured checksum type is not supported by tos. The supported checksum 
types are:
+   * CRC32C, CRC64ECMA.
+   */
+  public static final String FS_TOS_CHECKSUM_TYPE = "fs.tos.checksum-type";
+  public static final String FS_TOS_CHECKSUM_TYPE_DEFAULT = 
ChecksumType.CRC64ECMA.name();
 }
diff --git 
a/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/object/FileStore.java
 
b/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/object/FileStore.java
index 568a294bf10..22dd46fab7b 100644
--- 
a/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/object/FileStore.java
+++ 
b/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/object/FileStore.java
@@ -22,6 +22,8 @@ import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.tosfs.conf.ConfKeys;
+import org.apache.hadoop.fs.tosfs.conf.FileStoreKeys;
 import org.apache.hadoop.fs.tosfs.object.exceptions.NotAppendableException;
 import org.apache.hadoop.fs.tosfs.object.request.ListObjectsRequest;
 import org.apache.hadoop.fs.tosfs.object.response.ListObjectsResponse;
@@ -58,13 +60,6 @@ import java.util.TreeSet;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
-import static 
org.apache.hadoop.fs.tosfs.conf.FileStoreKeys.FS_FILESTORE_BATCH_DELETE_SIZE;
-import static 
org.apache.hadoop.fs.tosfs.conf.FileStoreKeys.FS_FILESTORE_BATCH_DELETE_SIZE_DEFAULT;
-import static 
org.apache.hadoop.fs.tosfs.conf.FileStoreKeys.FS_FILESTORE_CHECKSUM_ALGORITHM;
-import static 
org.apache.hadoop.fs.tosfs.conf.FileStoreKeys.FS_FILESTORE_CHECKSUM_TYPE;
-import static 
org.apache.hadoop.fs.tosfs.conf.FileStoreKeys.FS_FILESTORE_CHECKSUM_TYPE_DEFAULT;
-import static 
org.apache.hadoop.fs.tosfs.conf.FileStoreKeys.FS_FILESTORE_ENDPOINT;
-
 public class FileStore implements ObjectStorage {
 
   private static final Logger LOG = LoggerFactory.getLogger(FileStore.class);
@@ -99,11 +94,12 @@ public class FileStore implements ObjectStorage {
   public void initialize(Configuration conf, String bucket) {
     this.bucket = bucket;
     this.conf = conf;
-    String endpoint = conf.get(FS_FILESTORE_ENDPOINT);
+    String endpoint = conf.get(ConfKeys.FS_OBJECT_STORAGE_ENDPOINT.key(NAME));
     if (endpoint == null || endpoint.isEmpty()) {
       endpoint = System.getenv(ENV_FILE_STORAGE_ROOT);
     }
-    Preconditions.checkNotNull(endpoint, "%s cannot be null", 
FS_FILESTORE_ENDPOINT);
+    Preconditions.checkNotNull(endpoint, "%s cannot be null",
+        ConfKeys.FS_OBJECT_STORAGE_ENDPOINT.key(NAME));
 
     if (endpoint.endsWith(SLASH)) {
       this.root = endpoint;
@@ -112,9 +108,10 @@ public class FileStore implements ObjectStorage {
     }
     LOG.debug("the root path is: {}", this.root);
 
-    String algorithm = conf.get(FS_FILESTORE_CHECKSUM_ALGORITHM);
-    ChecksumType checksumType = ChecksumType.valueOf(
-        conf.get(FS_FILESTORE_CHECKSUM_TYPE, 
FS_FILESTORE_CHECKSUM_TYPE_DEFAULT).toUpperCase());
+    String algorithm = conf.get(FileStoreKeys.FS_FILESTORE_CHECKSUM_ALGORITHM,
+        FileStoreKeys.FS_FILESTORE_CHECKSUM_ALGORITHM_DEFAULT);
+    ChecksumType checksumType = 
ChecksumType.valueOf(conf.get(FileStoreKeys.FS_FILESTORE_CHECKSUM_TYPE,
+        FileStoreKeys.FS_FILESTORE_CHECKSUM_TYPE_DEFAULT).toUpperCase());
     Preconditions.checkArgument(checksumType == ChecksumType.MD5,
         "Checksum type %s is not supported by FileStore.", 
checksumType.name());
     checksumInfo = new ChecksumInfo(algorithm, checksumType);
@@ -241,8 +238,8 @@ public class FileStore implements ObjectStorage {
   @Override
   public void deleteAll(String prefix) {
     Iterable<ObjectInfo> objects = listAll(prefix, "");
-    ObjectUtils.deleteAllObjects(this, objects,
-        conf.getInt(FS_FILESTORE_BATCH_DELETE_SIZE, 
FS_FILESTORE_BATCH_DELETE_SIZE_DEFAULT));
+    ObjectUtils.deleteAllObjects(this, objects, 
conf.getInt(ConfKeys.FS_BATCH_DELETE_SIZE.key(NAME),
+        ConfKeys.FS_BATCH_DELETE_SIZE_DEFAULT));
   }
 
   @Override
diff --git 
a/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/object/ObjectOutputStream.java
 
b/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/object/ObjectOutputStream.java
index a7510420991..d6fbdebe8f4 100644
--- 
a/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/object/ObjectOutputStream.java
+++ 
b/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/object/ObjectOutputStream.java
@@ -73,12 +73,12 @@ public class ObjectOutputStream extends OutputStream {
     this.destScheme = dest.toUri().getScheme();
     this.totalWroteSize = 0;
     this.destKey = createDestKey(dest);
-    this.multiUploadThreshold = 
conf.getLong(ConfKeys.MULTIPART_THRESHOLD.key(destScheme),
-        ConfKeys.MULTIPART_THRESHOLD_DEFAULT);
+    this.multiUploadThreshold = 
conf.getLong(ConfKeys.FS_MULTIPART_THRESHOLD.key(destScheme),
+        ConfKeys.FS_MULTIPART_THRESHOLD_DEFAULT);
     this.byteSizePerPart =
-        conf.getLong(ConfKeys.MULTIPART_SIZE.key(destScheme), 
ConfKeys.MULTIPART_SIZE_DEFAULT);
-    this.stagingBufferSize = 
conf.getInt(ConfKeys.MULTIPART_STAGING_BUFFER_SIZE,
-        ConfKeys.MULTIPART_STAGING_BUFFER_SIZE_DEFAULT);
+        conf.getLong(ConfKeys.FS_MULTIPART_SIZE.key(destScheme), 
ConfKeys.FS_MULTIPART_SIZE_DEFAULT);
+    this.stagingBufferSize = 
conf.getInt(ConfKeys.FS_MULTIPART_STAGING_BUFFER_SIZE.key(destScheme),
+        ConfKeys.FS_MULTIPART_STAGING_BUFFER_SIZE_DEFAULT);
     this.allowPut = allowPut;
     this.stagingDirs = createStagingDirs(conf, destScheme);
 
@@ -88,10 +88,10 @@ public class ObjectOutputStream extends OutputStream {
   }
 
   private static List<File> createStagingDirs(Configuration conf, String 
scheme) {
-    String[] dirs =
-        conf.getStrings(ConfKeys.MULTIPART_STAGING_DIR, 
ConfKeys.MULTIPART_STAGING_DIR_DEFAULT);
+    String[] dirs = 
conf.getStrings(ConfKeys.FS_MULTIPART_STAGING_DIR.key(scheme),
+        ConfKeys.FS_MULTIPART_STAGING_DIR_DEFAULT);
     Preconditions.checkArgument(dirs != null && dirs.length > 0, "'%s' cannot 
be an empty list",
-        ConfKeys.MULTIPART_STAGING_DIR);
+        ConfKeys.FS_MULTIPART_STAGING_DIR.key(scheme));
 
     List<File> stagingDirs = new ArrayList<>();
     for (String dir : dirs) {
@@ -99,14 +99,18 @@ public class ObjectOutputStream extends OutputStream {
       File stagingDir = new File(dir);
       if (!stagingDir.exists() && stagingDir.mkdirs()) {
         Preconditions.checkArgument(stagingDir.setWritable(true, false),
-            "Failed to change staging dir permission to writable, please check 
%s with value %s", ConfKeys.MULTIPART_STAGING_DIR, dir);
+            "Failed to change staging dir permission to writable, please check 
%s with value %s",
+            ConfKeys.FS_MULTIPART_STAGING_DIR.key(scheme), dir);
         Preconditions.checkArgument(stagingDir.setReadable(true, false),
-            "Failed to change staging dir permission to readable, please check 
%s with value %s", ConfKeys.MULTIPART_STAGING_DIR, dir);
+            "Failed to change staging dir permission to readable, please check 
%s with value %s",
+            ConfKeys.FS_MULTIPART_STAGING_DIR.key(scheme), dir);
       } else {
         Preconditions.checkArgument(stagingDir.exists(),
-            "Failed to create staging dir, please check %s with value %s", 
ConfKeys.MULTIPART_STAGING_DIR, dir);
+            "Failed to create staging dir, please check %s with value %s",
+            ConfKeys.FS_MULTIPART_STAGING_DIR.key(scheme), dir);
         Preconditions.checkArgument(stagingDir.isDirectory(),
-            "Staging dir should be a directory, please check %s with value 
%s", ConfKeys.MULTIPART_STAGING_DIR, dir);
+            "Staging dir should be a directory, please check %s with value %s",
+            ConfKeys.FS_MULTIPART_STAGING_DIR.key(scheme), dir);
       }
       stagingDirs.add(stagingDir);
     }
@@ -174,7 +178,7 @@ public class ObjectOutputStream extends OutputStream {
         Preconditions.checkState(byteSizePerPart >= 
multipartUpload.minPartSize(),
             "Configured upload part size %s must be greater than or equals to 
the minimal part size %s,"
                 + " please check configure key %s.", byteSizePerPart, 
multipartUpload.minPartSize(),
-            ConfKeys.MULTIPART_THRESHOLD.key(destScheme));
+            ConfKeys.FS_MULTIPART_THRESHOLD.key(destScheme));
 
         // Upload the accumulated staging files whose length >= 
byteSizePerPart.
         for (StagingPart stagingPart : stagingParts) {
diff --git 
a/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/object/tos/TOS.java
 
b/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/object/tos/TOS.java
index e83ed8a4b3b..a512e708584 100644
--- 
a/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/object/tos/TOS.java
+++ 
b/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/object/tos/TOS.java
@@ -184,9 +184,9 @@ public class TOS implements DirectoryStorage {
     defaultAcl = 
TypeConverter.convertACLType(conf.get(TosKeys.FS_TOS_ACL_DEFAULT));
 
     String algorithm =
-        conf.get(ConfKeys.TOS_CHECKSUM_ALGORITHM, 
ConfKeys.TOS_CHECKSUM_ALGORITHM_DEFAULT);
+        conf.get(TosKeys.FS_TOS_CHECKSUM_ALGORITHM, 
TosKeys.FS_TOS_CHECKSUM_ALGORITHM_DEFAULT);
     ChecksumType checksumType = ChecksumType.valueOf(
-        conf.get(ConfKeys.TOS_CHECKSUM_TYPE, 
ConfKeys.TOS_CHECKSUM_TYPE_DEFAULT).toUpperCase());
+        conf.get(TosKeys.FS_TOS_CHECKSUM_TYPE, 
TosKeys.FS_TOS_CHECKSUM_TYPE_DEFAULT).toUpperCase());
     Preconditions.checkArgument(CHECKSUM_HEADER.containsKey(checksumType),
         "Checksum type %s is not supported by TOS.", checksumType.name());
     checksumInfo = new ChecksumInfo(algorithm, checksumType);
@@ -1005,8 +1005,8 @@ public class TOS implements DirectoryStorage {
   public ObjectInfo objectStatus(String key) {
     if (bucket().isDirectory()) {
       return head(key);
-    } else if (conf.getBoolean(ConfKeys.TOS_GET_FILE_STATUS_ENABLED,
-        ConfKeys.TOS_GET_FILE_STATUS_ENABLED_DEFAULT)) {
+    } else if (conf.getBoolean(TosKeys.FS_TOS_GET_FILE_STATUS_ENABLED,
+        TosKeys.FS_TOS_GET_FILE_STATUS_ENABLED_DEFAULT)) {
       return getFileStatus(key);
     } else {
       ObjectInfo obj = head(key);
diff --git 
a/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/ops/DefaultFsOps.java
 
b/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/ops/DefaultFsOps.java
index 1daba434f6f..09541f9fe6e 100644
--- 
a/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/ops/DefaultFsOps.java
+++ 
b/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/ops/DefaultFsOps.java
@@ -63,8 +63,8 @@ public class DefaultFsOps implements FsOps {
     this.objMapper = objMapper;
     this.renameOp = new RenameOp(conf, storage, taskThreadPool);
     this.asyncCreateParentDir =
-        
conf.getBoolean(ConfKeys.ASYNC_CREATE_MISSED_PARENT.key(storage.scheme()),
-            ConfKeys.ASYNC_CREATE_MISSED_PARENT_DEFAULT);
+        
conf.getBoolean(ConfKeys.FS_ASYNC_CREATE_MISSED_PARENT.key(storage.scheme()),
+            ConfKeys.FS_ASYNC_CREATE_MISSED_PARENT_DEFAULT);
   }
 
   @Override
diff --git 
a/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/ops/RenameOp.java
 
b/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/ops/RenameOp.java
index b52aa88def9..f7feb85eb2d 100644
--- 
a/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/ops/RenameOp.java
+++ 
b/hadoop-cloud-storage-project/hadoop-tos/src/main/java/org/apache/hadoop/fs/tosfs/ops/RenameOp.java
@@ -56,8 +56,8 @@ public class RenameOp {
     this.conf = conf;
     this.storage = storage;
     this.renamePool = taskThreadPool;
-    this.renameObjectEnabled = 
conf.getBoolean(ConfKeys.OBJECT_RENAME_ENABLED.key(storage.scheme()),
-        ConfKeys.OBJECT_RENAME_ENABLED_DEFAULT);
+    this.renameObjectEnabled = 
conf.getBoolean(ConfKeys.FS_OBJECT_RENAME_ENABLED.key(storage.scheme()),
+        ConfKeys.FS_OBJECT_RENAME_ENABLED_DEFAULT);
   }
 
   public void renameDir(Path src, Path dst) {
@@ -155,11 +155,11 @@ public class RenameOp {
   }
 
   private void copyFile(String srcKey, String dstKey, long srcSize) throws 
IOException {
-    long byteSizePerPart = 
conf.getLong(ConfKeys.MULTIPART_SIZE.key(storage.scheme()),
-        ConfKeys.MULTIPART_SIZE_DEFAULT);
+    long byteSizePerPart = 
conf.getLong(ConfKeys.FS_MULTIPART_SIZE.key(storage.scheme()),
+        ConfKeys.FS_MULTIPART_SIZE_DEFAULT);
     long multiPartCopyThreshold =
-        conf.getLong(ConfKeys.MULTIPART_COPY_THRESHOLD.key(storage.scheme()),
-            ConfKeys.MULTIPART_COPY_THRESHOLD_DEFAULT);
+        
conf.getLong(ConfKeys.FS_MULTIPART_COPY_THRESHOLD.key(storage.scheme()),
+            ConfKeys.FS_MULTIPART_COPY_THRESHOLD_DEFAULT);
     if (srcSize > multiPartCopyThreshold) {
       uploadPartCopy(srcKey, srcSize, dstKey, byteSizePerPart);
     } else {
@@ -173,7 +173,7 @@ public class RenameOp {
       Preconditions.checkState(byteSizePerPart >= 
multipartUpload.minPartSize(),
           "Configured upload part size %s must be greater than or equals to 
the minimal part size %s,"
               + " please check configure key %s.", byteSizePerPart, 
multipartUpload.minPartSize(),
-          ConfKeys.MULTIPART_SIZE.key(storage.scheme()));
+          ConfKeys.FS_MULTIPART_SIZE.key(storage.scheme()));
 
       AtomicInteger partNumGetter = new AtomicInteger(0);
       List<CompletableFuture<Part>> results = Lists.newArrayList();
diff --git 
a/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/TestTosChecksum.java
 
b/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/TestTosChecksum.java
index b5244bdd9bc..1192ca5d7ab 100644
--- 
a/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/TestTosChecksum.java
+++ 
b/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/TestTosChecksum.java
@@ -63,7 +63,7 @@ public class TestTosChecksum {
     Configuration fileStoreConf = new Configuration();
     fileStoreConf.set(FileStoreKeys.FS_FILESTORE_CHECKSUM_ALGORITHM, 
ALGORITHM_NAME);
     fileStoreConf.set(FileStoreKeys.FS_FILESTORE_CHECKSUM_TYPE, 
ChecksumType.MD5.name());
-    fileStoreConf.set(FileStoreKeys.FS_FILESTORE_ENDPOINT, fileStoreRoot);
+    fileStoreConf.set(ConfKeys.FS_OBJECT_STORAGE_ENDPOINT.key("filestore"), 
fileStoreRoot);
 
     URI uri0 = new URI("filestore://" + TestUtility.bucket() + "/");
     Object[] objs = new Object[] { ChecksumType.MD5, fileStoreConf, uri0,
@@ -72,8 +72,8 @@ public class TestTosChecksum {
 
     // The 2nd argument.
     Configuration tosConf = new Configuration();
-    tosConf.set(ConfKeys.TOS_CHECKSUM_ALGORITHM, ALGORITHM_NAME);
-    tosConf.set(ConfKeys.TOS_CHECKSUM_TYPE, ChecksumType.CRC32C.name());
+    tosConf.set(TosKeys.FS_TOS_CHECKSUM_ALGORITHM, ALGORITHM_NAME);
+    tosConf.set(TosKeys.FS_TOS_CHECKSUM_TYPE, ChecksumType.CRC32C.name());
 
     URI uri1 = new URI(TOS_SCHEME + "://" + TestUtility.bucket() + "/");
     objs = new Object[] { ChecksumType.CRC32C, tosConf, uri1,
diff --git 
a/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/contract/TestChecksum.java
 
b/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/contract/TestChecksum.java
index eaadacc7421..9c602bec23c 100644
--- 
a/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/contract/TestChecksum.java
+++ 
b/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/contract/TestChecksum.java
@@ -101,7 +101,7 @@ public class TestChecksum extends 
AbstractFSContractTestBase {
 
     // disable checksum
     Configuration newConf = new Configuration(getFileSystem().getConf());
-    newConf.setBoolean(ConfKeys.CHECKSUM_ENABLED.key("tos"), false);
+    newConf.setBoolean(ConfKeys.FS_CHECKSUM_ENABLED.key("tos"), false);
     FileSystem newFS = FileSystem.get(newConf);
 
     assertEquals(newFS.getFileChecksum(path1), newFS.getFileChecksum(path2));
diff --git 
a/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/contract/TestGetFileStatus.java
 
b/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/contract/TestGetFileStatus.java
index f4f7b534da5..6cfbdb5dff0 100644
--- 
a/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/contract/TestGetFileStatus.java
+++ 
b/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/contract/TestGetFileStatus.java
@@ -26,6 +26,7 @@ import 
org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 import org.apache.hadoop.fs.tosfs.RawFileStatus;
 import org.apache.hadoop.fs.tosfs.conf.ConfKeys;
+import org.apache.hadoop.fs.tosfs.conf.TosKeys;
 import org.apache.hadoop.fs.tosfs.object.Constants;
 import org.apache.hadoop.fs.tosfs.util.UUIDUtils;
 import org.junit.Assert;
@@ -58,8 +59,8 @@ public class TestGetFileStatus extends 
AbstractContractGetFileStatusTest {
 
   @Override
   protected AbstractFSContract createContract(Configuration conf) {
-    conf.setBoolean(ConfKeys.TOS_GET_FILE_STATUS_ENABLED, 
getFileStatusEnabled);
-    conf.setBoolean(ConfKeys.ASYNC_CREATE_MISSED_PARENT.key("tos"), false);
+    conf.setBoolean(TosKeys.FS_TOS_GET_FILE_STATUS_ENABLED, 
getFileStatusEnabled);
+    conf.setBoolean(ConfKeys.FS_ASYNC_CREATE_MISSED_PARENT.key("tos"), false);
     return new TosContract(conf);
   }
 
diff --git 
a/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/contract/TestRename.java
 
b/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/contract/TestRename.java
index 179619c9f36..7cd0e3e0d87 100644
--- 
a/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/contract/TestRename.java
+++ 
b/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/contract/TestRename.java
@@ -40,9 +40,9 @@ public class TestRename extends AbstractContractRenameTest {
     // Add follow two keys into hadoop configuration.
     String defaultScheme = FileSystem.getDefaultUri(conf).getScheme();
     Configuration newConf = new Configuration(conf);
-    newConf.setLong(ConfKeys.MULTIPART_SIZE.key(defaultScheme), 
ConfKeys.MULTIPART_SIZE_DEFAULT);
-    newConf.setLong(ConfKeys.MULTIPART_THRESHOLD.key(defaultScheme),
-        ConfKeys.MULTIPART_THRESHOLD_DEFAULT);
+    newConf.setLong(ConfKeys.FS_MULTIPART_SIZE.key(defaultScheme), 
ConfKeys.FS_MULTIPART_SIZE_DEFAULT);
+    newConf.setLong(ConfKeys.FS_MULTIPART_THRESHOLD.key(defaultScheme),
+        ConfKeys.FS_MULTIPART_THRESHOLD_DEFAULT);
 
     return new TosContract(newConf);
   }
diff --git 
a/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/object/TestObjectOutputStream.java
 
b/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/object/TestObjectOutputStream.java
index 203f3c5aecc..ac450d6d958 100644
--- 
a/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/object/TestObjectOutputStream.java
+++ 
b/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/object/TestObjectOutputStream.java
@@ -74,7 +74,7 @@ public class TestObjectOutputStream extends 
ObjectStorageTestBase {
         tmpDirs.add(tmp.newDir());
       }
       Configuration newConf = new Configuration(protonConf);
-      newConf.set(ConfKeys.MULTIPART_STAGING_DIR.format("filestore"), 
Joiner.on(",").join(tmpDirs));
+      newConf.set(ConfKeys.FS_MULTIPART_STAGING_DIR.key("filestore"), 
Joiner.on(",").join(tmpDirs));
 
       // Start multiple threads to open streams to create staging dir.
       List<Future<ObjectOutputStream>> futures = 
Collections.synchronizedList(new ArrayList<>());
@@ -117,7 +117,7 @@ public class TestObjectOutputStream extends 
ObjectStorageTestBase {
   public void testDeleteStagingFileWhenUploadPartsOK() throws IOException {
     Path path = path("data.txt");
     ObjectOutputStream out = new ObjectOutputStream(storage, threadPool, 
protonConf, path, true);
-    byte[] data = TestUtility.rand((int) (ConfKeys.MULTIPART_SIZE_DEFAULT * 
2));
+    byte[] data = TestUtility.rand((int) (ConfKeys.FS_MULTIPART_SIZE_DEFAULT * 
2));
     out.write(data);
     out.waitForPartsUpload();
     for (StagingPart part : out.stagingParts()) {
@@ -133,7 +133,7 @@ public class TestObjectOutputStream extends 
ObjectStorageTestBase {
   public void testDeleteStagingFileWithClose() throws IOException {
     Path path = path("data.txt");
     ObjectOutputStream out = new ObjectOutputStream(storage, threadPool, 
protonConf, path, true);
-    byte[] data = TestUtility.rand((int) (ConfKeys.MULTIPART_SIZE_DEFAULT * 
2));
+    byte[] data = TestUtility.rand((int) (ConfKeys.FS_MULTIPART_SIZE_DEFAULT * 
2));
     out.write(data);
     out.close();
     for (StagingPart part : out.stagingParts()) {
@@ -172,7 +172,7 @@ public class TestObjectOutputStream extends 
ObjectStorageTestBase {
 
   public void testWrite(int uploadPartSize, int len) throws IOException {
     Configuration newConf = new Configuration(protonConf);
-    newConf.setLong(ConfKeys.MULTIPART_SIZE.key(FSUtils.scheme(conf, 
testDir.toUri())),
+    newConf.setLong(ConfKeys.FS_MULTIPART_SIZE.key(FSUtils.scheme(conf, 
testDir.toUri())),
         uploadPartSize);
 
     Path outPath = path(len + ".txt");
@@ -208,7 +208,7 @@ public class TestObjectOutputStream extends 
ObjectStorageTestBase {
   public void testParallelWriteOneOutPutStreamImpl(int partSize, int epochs, 
int batchSize)
       throws IOException, ExecutionException, InterruptedException {
     Configuration newConf = new Configuration(protonConf);
-    newConf.setLong(ConfKeys.MULTIPART_SIZE.key(FSUtils.scheme(conf, 
testDir.toUri())),
+    newConf.setLong(ConfKeys.FS_MULTIPART_SIZE.key(FSUtils.scheme(conf, 
testDir.toUri())),
         partSize);
 
     String file = 
String.format("%d-%d-%d-testParallelWriteOneOutPutStream.txt", partSize, 
epochs, batchSize);
@@ -284,8 +284,8 @@ public class TestObjectOutputStream extends 
ObjectStorageTestBase {
 
   private void testMultipartThreshold(int partSize, int multipartThreshold, 
int dataSize) throws IOException {
     Configuration newConf = new Configuration(protonConf);
-    newConf.setLong(ConfKeys.MULTIPART_SIZE.key(scheme), partSize);
-    newConf.setLong(ConfKeys.MULTIPART_THRESHOLD.key(scheme), 
multipartThreshold);
+    newConf.setLong(ConfKeys.FS_MULTIPART_SIZE.key(scheme), partSize);
+    newConf.setLong(ConfKeys.FS_MULTIPART_THRESHOLD.key(scheme), 
multipartThreshold);
     Path outPath = path(String.format("threshold-%d-%d-%d.txt", partSize, 
multipartThreshold, dataSize));
 
     byte[] data = TestUtility.rand(dataSize);
diff --git 
a/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/object/tos/TestTOSObjectStorage.java
 
b/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/object/tos/TestTOSObjectStorage.java
index d0f52ef5902..98fbeb727d8 100644
--- 
a/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/object/tos/TestTOSObjectStorage.java
+++ 
b/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/object/tos/TestTOSObjectStorage.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.fs.tosfs.object.tos;
 import com.volcengine.tos.internal.model.CRC64Checksum;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.tosfs.common.Bytes;
-import org.apache.hadoop.fs.tosfs.conf.ConfKeys;
 import org.apache.hadoop.fs.tosfs.conf.TosKeys;
 import org.apache.hadoop.fs.tosfs.object.ChecksumType;
 import org.apache.hadoop.fs.tosfs.object.Constants;
@@ -72,13 +71,13 @@ public class TestTOSObjectStorage {
     List<Object[]> values = new ArrayList<>();
 
     Configuration conf = new Configuration();
-    conf.set(ConfKeys.TOS_CHECKSUM_TYPE, ChecksumType.CRC64ECMA.name());
+    conf.set(TosKeys.FS_TOS_CHECKSUM_TYPE, ChecksumType.CRC64ECMA.name());
     values.add(new Object[] {
         ObjectStorageFactory.createWithPrefix(String.format("tos-%s/", 
UUIDUtils.random()),
             TOS_SCHEME, TestUtility.bucket(), conf), new CRC64Checksum(), 
ChecksumType.CRC64ECMA });
 
     conf = new Configuration();
-    conf.set(ConfKeys.TOS_CHECKSUM_TYPE, ChecksumType.CRC32C.name());
+    conf.set(TosKeys.FS_TOS_CHECKSUM_TYPE, ChecksumType.CRC32C.name());
     values.add(new Object[] {
         ObjectStorageFactory.createWithPrefix(String.format("tos-%s/", 
UUIDUtils.random()),
             TOS_SCHEME, TestUtility.bucket(), conf), new PureJavaCrc32C(), 
ChecksumType.CRC32C });
@@ -113,7 +112,7 @@ public class TestTOSObjectStorage {
     Assume.assumeFalse(tos.bucket().isDirectory());
 
     Configuration conf = new Configuration(tos.conf());
-    conf.setBoolean(ConfKeys.TOS_GET_FILE_STATUS_ENABLED, true);
+    conf.setBoolean(TosKeys.FS_TOS_GET_FILE_STATUS_ENABLED, true);
     tos.initialize(conf, tos.bucket().name());
 
     String key = "testFileStatus";
diff --git 
a/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/ops/TestDefaultFsOps.java
 
b/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/ops/TestDefaultFsOps.java
index 569c1012433..23d3cde0bef 100644
--- 
a/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/ops/TestDefaultFsOps.java
+++ 
b/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/ops/TestDefaultFsOps.java
@@ -50,12 +50,12 @@ public class TestDefaultFsOps extends TestBaseFsOps {
   @Parameterized.Parameters(name = "conf = {0}")
   public static List<Configuration> createConf() {
     Configuration directRenameConf = new Configuration();
-    directRenameConf.setBoolean(ConfKeys.OBJECT_RENAME_ENABLED.key("tos"), 
true);
-    
directRenameConf.setBoolean(ConfKeys.ASYNC_CREATE_MISSED_PARENT.key("tos"), 
false);
+    directRenameConf.setBoolean(ConfKeys.FS_OBJECT_RENAME_ENABLED.key("tos"), 
true);
+    
directRenameConf.setBoolean(ConfKeys.FS_ASYNC_CREATE_MISSED_PARENT.key("tos"), 
false);
 
     Configuration copiedRenameConf = new Configuration();
-    copiedRenameConf.setLong(ConfKeys.MULTIPART_COPY_THRESHOLD.key("tos"), 1L 
<< 20);
-    
copiedRenameConf.setBoolean(ConfKeys.ASYNC_CREATE_MISSED_PARENT.key("tos"), 
false);
+    copiedRenameConf.setLong(ConfKeys.FS_MULTIPART_COPY_THRESHOLD.key("tos"), 
1L << 20);
+    
copiedRenameConf.setBoolean(ConfKeys.FS_ASYNC_CREATE_MISSED_PARENT.key("tos"), 
false);
     return Lists.newArrayList(directRenameConf, copiedRenameConf);
   }
 
diff --git 
a/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/ops/TestRenameOp.java
 
b/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/ops/TestRenameOp.java
index f9ea7e624dd..ccbc1a37bf1 100644
--- 
a/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/ops/TestRenameOp.java
+++ 
b/hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/ops/TestRenameOp.java
@@ -79,7 +79,7 @@ public class TestRenameOp extends TestBaseOps {
   @Test
   public void testRenameFileDirectly() throws IOException {
     Configuration conf = new Configuration();
-    conf.setLong(ConfKeys.MULTIPART_COPY_THRESHOLD.key(storage.scheme()), 1L 
<< 20);
+    conf.setLong(ConfKeys.FS_MULTIPART_COPY_THRESHOLD.key(storage.scheme()), 
1L << 20);
     operation = new ExtendedRenameOp(conf, storage, renamePool);
 
     Path renameSrc = path("renameSrc");
@@ -111,7 +111,7 @@ public class TestRenameOp extends TestBaseOps {
   public void testRenameFileByUploadParts() throws IOException {
     Assume.assumeFalse(storage.bucket().isDirectory());
     Configuration conf = new Configuration();
-    conf.setLong(ConfKeys.MULTIPART_COPY_THRESHOLD.key(storage.scheme()), 1L 
<< 20);
+    conf.setLong(ConfKeys.FS_MULTIPART_COPY_THRESHOLD.key(storage.scheme()), 
1L << 20);
     operation = new ExtendedRenameOp(conf, storage, renamePool);
 
     Path renameSrc = path("renameSrc");


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org


Reply via email to