This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch branch-refactor_property
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-refactor_property by 
this push:
     new 303c11bb378 [Feat](Catalog)Add Integration and Unit Tests for HMS and 
Fix Configuration File Issues (#48086)
303c11bb378 is described below

commit 303c11bb3781333293a48887848dcf3899306b83
Author: Calvin Kirs <[email protected]>
AuthorDate: Tue Feb 25 11:36:53 2025 +0800

    [Feat](Catalog)Add Integration and Unit Tests for HMS and Fix Configuration 
File Issues (#48086)
    
    ### Purpose:
    This PR adds integration tests and unit tests (UT) for the Hive
    Metastore Service (HMS) while fixing several critical bugs, including:↳
    
    ### Params issues
    Configuration file loading errors
    ### Changes:
    Integration and Unit Tests:
    
    - Added integration tests for HMS to ensure proper interaction with
    other system components.
    - Developed unit tests for key HMS modules, covering core business logic
    to ensure correctness.
    ### Check List (For Author)
    
    - Test <!-- At least one of them must be included. -->
        - [ ] Regression test
        - [ ] Unit Test
        - [ ] Manual test (add detailed scripts or steps below)
        - [ ] No need to test or manual test. Explain why:
    - [ ] This is a refactor/code format and no logic has been changed.
            - [ ] Previous test can cover this change.
            - [ ] No code files have been changed.
            - [ ] Other reason <!-- Add your reason?  -->
    
    - Behavior changed:
        - [ ] No.
        - [ ] Yes. <!-- Explain the behavior change -->
    
    - Does this need documentation?
        - [ ] No.
    - [ ] Yes. <!-- Add document PR link here. eg:
    https://github.com/apache/doris-website/pull/1214 -->
    
    ### Check List (For Reviewer who merge this PR)
    
    - [ ] Confirm the release note
    - [ ] Confirm test cases
    - [ ] Confirm document
    - [ ] Add branch pick label <!-- Add branch pick label that this PR
    should merge into -->
    
    ---------
    
    Co-authored-by: Mingyu Chen (Rayner) <[email protected]>
---
 fe/fe-common/pom.xml                               |   5 +
 .../doris/common/CatalogConfigFileUtils.java       |  91 +++++++++
 .../apache/doris/common/ConfigurationUtils.java    |  68 -------
 .../datasource/property/ConnectionProperties.java  |   4 +-
 .../connection/PaimonConnectionProperties.java     |   3 +-
 .../property/metastore/HMSProperties.java          |  58 ++++--
 .../storage/AbstractObjectStorageProperties.java   | 129 ++++++++++++
 .../datasource/property/storage/COSProperties.java |  79 ++++++++
 .../property/storage/HDFSProperties.java           |  13 +-
 .../datasource/property/storage/OBSProperties.java |  71 +++++++
 .../datasource/property/storage/OSSProperties.java |  88 ++++++++
 .../property/storage/ObjectStorageProperties.java  |  49 +++++
 .../datasource/property/storage/S3Properties.java  |  41 ++--
 .../property/storage/StorageProperties.java        |  18 ++
 .../property/metastore/HMSIntegrationTest.java     | 222 +++++++++++++++++++++
 .../property/metastore/HMSPropertiesTest.java      |  98 +++++++++
 .../property/storage/COSPropertiesTest.java        | 138 +++++++++++++
 .../property/storage/HDFSPropertiesTest.java       | 101 +++++++---
 .../property/storage/OBSPropertyTest.java          | 126 ++++++++++++
 .../property/storage/OSSPropertiesTest.java        | 120 +++++++++++
 .../property/storage/S3PropertiesTest.java         | 134 +++++++++++++
 .../plugins/hadoop_conf/hadoop1/core-site.xml      |  22 ++
 .../plugins/hadoop_conf/hadoop1/hdfs-site.xml      |  32 +++
 .../plugins/hive-conf/hive1/hive-site.xml          |   8 +
 24 files changed, 1587 insertions(+), 131 deletions(-)

diff --git a/fe/fe-common/pom.xml b/fe/fe-common/pom.xml
index 55934fd4e24..e124770aa90 100644
--- a/fe/fe-common/pom.xml
+++ b/fe/fe-common/pom.xml
@@ -64,6 +64,11 @@ under the License.
             <groupId>org.apache.httpcomponents</groupId>
             <artifactId>httpcore</artifactId>
         </dependency>
+        <dependency>
+            <groupId>org.apache.doris</groupId>
+            <artifactId>hive-catalog-shade</artifactId>
+            <scope>provided</scope>
+        </dependency>
         <dependency>
             <groupId>org.roaringbitmap</groupId>
             <artifactId>RoaringBitmap</artifactId>
diff --git 
a/fe/fe-common/src/main/java/org/apache/doris/common/CatalogConfigFileUtils.java
 
b/fe/fe-common/src/main/java/org/apache/doris/common/CatalogConfigFileUtils.java
new file mode 100644
index 00000000000..e6fe4c7f10c
--- /dev/null
+++ 
b/fe/fe-common/src/main/java/org/apache/doris/common/CatalogConfigFileUtils.java
@@ -0,0 +1,91 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.common;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
+
+import java.io.File;
+import java.util.function.BiConsumer;
+
+public class CatalogConfigFileUtils {
+
+    /**
+     * Loads configuration files from the specified directory into a Hadoop 
Configuration or HiveConf object.
+     *
+     * @param resourcesPath The comma-separated list of configuration resource 
files to load.
+     *                      This must not be null or empty.
+     * @param configDir The base directory where the configuration files are 
located.
+     * @param addResourceMethod A method reference to add the resource to the 
configuration.
+     * @param <T> The type of configuration object (either Hadoop 
Configuration or HiveConf).
+     * @return The populated configuration object.
+     * @throws IllegalArgumentException If the provided resourcesPath is 
blank, or if any of the specified
+     *                                  configuration files do not exist or 
are not regular files.
+     */
+    private static <T> T loadConfigFromDir(String resourcesPath, String 
configDir,
+                                           BiConsumer<T, Path> 
addResourceMethod) {
+        // Check if the provided resourcesPath is blank and throw an exception 
if so.
+        if (StringUtils.isBlank(resourcesPath)) {
+            throw new IllegalArgumentException("Config resource path is 
empty");
+        }
+
+        // Create a new configuration object.
+        T conf = (T) (configDir.equals(Config.hadoop_config_dir) ? new 
Configuration(false) : new HiveConf());
+
+        // Iterate over the comma-separated list of resource files.
+        for (String resource : resourcesPath.split(",")) {
+            // Construct the full path to the resource file.
+            String resourcePath = configDir + File.separator + resource.trim();
+            File file = new File(resourcePath);
+
+            // Check if the file exists and is a regular file; if not, throw 
an exception.
+            if (file.exists() && file.isFile()) {
+                // Add the resource file to the configuration object.
+                addResourceMethod.accept(conf, new Path(file.toURI()));
+            } else {
+                // Throw an exception if the file does not exist or is not a 
regular file.
+                throw new IllegalArgumentException("Config resource file does 
not exist: " + resourcePath);
+            }
+        }
+        return conf;
+    }
+
+    /**
+     * Loads the Hadoop configuration files from the specified directory.
+     * @param resourcesPath The comma-separated list of Hadoop configuration 
resource files to load.
+     * @return The Hadoop `Configuration` object with the loaded configuration 
files.
+     * @throws IllegalArgumentException If the provided `resourcesPath` is 
blank, or if any of the specified
+     *                                  configuration files do not exist or 
are not regular files.
+     */
+    public static Configuration loadConfigurationFromHadoopConfDir(String 
resourcesPath) {
+        return loadConfigFromDir(resourcesPath, Config.hadoop_config_dir, 
Configuration::addResource);
+    }
+
+    /**
+     * Loads the Hive configuration files from the specified directory.
+     * @param resourcesPath The comma-separated list of Hive configuration 
resource files to load.
+     * @return The HiveConf object with the loaded configuration files.
+     * @throws IllegalArgumentException If the provided `resourcesPath` is 
blank, or if any of the specified
+     *                                  configuration files do not exist or 
are not regular files.
+     */
+    public static HiveConf loadHiveConfFromHiveConfDir(String resourcesPath) {
+        return loadConfigFromDir(resourcesPath, Config.hadoop_config_dir, 
HiveConf::addResource);
+    }
+}
diff --git 
a/fe/fe-common/src/main/java/org/apache/doris/common/ConfigurationUtils.java 
b/fe/fe-common/src/main/java/org/apache/doris/common/ConfigurationUtils.java
deleted file mode 100644
index d1c73bf1a2b..00000000000
--- a/fe/fe-common/src/main/java/org/apache/doris/common/ConfigurationUtils.java
+++ /dev/null
@@ -1,68 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.doris.common;
-
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-
-import java.io.File;
-
-public class ConfigurationUtils {
-
-    /**
-     * Loads the Hadoop configuration files from the specified directory.
-     * <p>
-     * This method reads a comma-separated list of resource files from the 
given
-     * `resourcesPath`, constructs their absolute paths based on the 
`Config.external_catalog_config_dir`,
-     * and then loads these files into a Hadoop `Configuration` object.
-     *
-     * @param resourcesPath The comma-separated list of Hadoop configuration 
resource files to load.
-     *                      This must not be null or empty.
-     * @return The Hadoop `Configuration` object with the loaded configuration 
files.
-     * @throws IllegalArgumentException If the provided `resourcesPath` is 
blank, or if any of the specified
-     *                                  configuration files do not exist or 
are not regular files.
-     */
-    public static Configuration loadConfigurationFromHadoopConfDir(String 
resourcesPath) {
-        // Check if the provided resourcesPath is blank and throw an exception 
if so.
-        if (StringUtils.isBlank(resourcesPath)) {
-            throw new IllegalArgumentException("Hadoop config resource path is 
empty");
-        }
-
-        // Create a new Hadoop Configuration object without loading default 
resources.
-        Configuration conf = new Configuration(false);
-
-        // Iterate over the comma-separated list of resource files.
-        for (String resource : resourcesPath.split(",")) {
-            // Construct the full path to the resource file.
-            String resourcePath = Config.hadoop_config_dir + File.separator + 
resource.trim();
-            File file = new File(resourcePath);
-
-            // Check if the file exists and is a regular file; if not, throw 
an exception.
-            if (file.exists() && file.isFile()) {
-                // Add the resource file to the Hadoop Configuration object.
-                conf.addResource(new Path(file.toURI()));
-            } else {
-                // Throw an exception if the file does not exist or is not a 
regular file.
-                throw new IllegalArgumentException("Hadoop config resource 
file does not exist: " + resourcePath);
-            }
-        }
-        // Return the populated Hadoop Configuration object.
-        return conf;
-    }
-}
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/ConnectionProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/ConnectionProperties.java
index 84a66fb0838..2722e0d7c05 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/ConnectionProperties.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/ConnectionProperties.java
@@ -17,7 +17,7 @@
 
 package org.apache.doris.datasource.property;
 
-import org.apache.doris.common.ConfigurationUtils;
+import org.apache.doris.common.CatalogConfigFileUtils;
 
 import com.google.common.base.Strings;
 import com.google.common.collect.Maps;
@@ -67,7 +67,7 @@ public class ConnectionProperties {
         if (Strings.isNullOrEmpty(origProps.get(resourceConfig))) {
             return Maps.newHashMap();
         }
-        Configuration conf = 
ConfigurationUtils.loadConfigurationFromHadoopConfDir(origProps.get(resourceConfig));
+        Configuration conf = 
CatalogConfigFileUtils.loadConfigurationFromHadoopConfDir(origProps.get(resourceConfig));
         Map<String, String> confMap = Maps.newHashMap();
         for (Map.Entry<String, String> entry : conf) {
             confMap.put(entry.getKey(), entry.getValue());
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/connection/PaimonConnectionProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/connection/PaimonConnectionProperties.java
index 7d4eaf842d9..b9d429f8a1a 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/connection/PaimonConnectionProperties.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/connection/PaimonConnectionProperties.java
@@ -65,7 +65,8 @@ public class PaimonConnectionProperties {
             case HMS:
                 options.set("metastore", "hive");
                 HMSProperties hmsProperties = (HMSProperties) metaProps;
-                hmsProperties.toPaimonOptionsAndConf(options, hadoopConf);
+                // TODO we need add all metastore parameters to paimon options?
+                hmsProperties.toPaimonOptionsAndConf(options);
                 break;
             case DLF:
                 options.set("metastore", "hive");
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/HMSProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/HMSProperties.java
index d304d7e7319..56e20637bf9 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/HMSProperties.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/HMSProperties.java
@@ -17,18 +17,23 @@
 
 package org.apache.doris.datasource.property.metastore;
 
+import org.apache.doris.common.CatalogConfigFileUtils;
 import org.apache.doris.datasource.property.ConnectorProperty;
 
 import com.google.common.base.Strings;
+import com.google.common.collect.Maps;
 import lombok.extern.slf4j.Slf4j;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.paimon.options.Options;
 
 import java.util.Map;
 
 @Slf4j
 public class HMSProperties extends MetastoreProperties {
-    @ConnectorProperty(names = {"hive.metastore.uri"},
+
+    private static final String HIVE_METASTORE_URLS_KEY = 
"hive.metastore.uris";
+    @ConnectorProperty(names = {"hive.metastore.uris"},
             description = "The uri of the hive metastore.")
     private String hiveMetastoreUri = "";
 
@@ -37,6 +42,11 @@ public class HMSProperties extends MetastoreProperties {
             description = "The authentication type of the hive metastore.")
     private String hiveMetastoreAuthenticationType = "none";
 
+    @ConnectorProperty(names = {"hive.conf.resources"},
+            required = false,
+            description = "The conf resources of the hive metastore.")
+    private String hiveConfResourcesConfig = "";
+
     @ConnectorProperty(names = {"hive.metastore.service.principal"},
             required = false,
             description = "The service principal of the hive metastore.")
@@ -58,12 +68,16 @@ public class HMSProperties extends MetastoreProperties {
 
     @Override
     protected String getResourceConfigPropName() {
-        return "hive.resource_config";
+        return "hive.conf.resources";
     }
 
     @Override
     protected void checkRequiredProperties() {
+        //fixme need consider load from default config
         super.checkRequiredProperties();
+        if (!Strings.isNullOrEmpty(hiveConfResourcesConfig)) {
+            checkHiveConfResourcesConfig();
+        }
         if ("kerberos".equalsIgnoreCase(hiveMetastoreAuthenticationType)) {
             if (Strings.isNullOrEmpty(hiveMetastoreServicePrincipal)
                     || Strings.isNullOrEmpty(hiveMetastoreClientPrincipal)
@@ -72,17 +86,24 @@ public class HMSProperties extends MetastoreProperties {
                         + "but service principal, client principal or client 
keytab is not set.");
             }
         }
+        if (Strings.isNullOrEmpty(hiveMetastoreUri)) {
+            throw new IllegalArgumentException("Hive metastore uri is 
required.");
+        }
+    }
+
+    private void checkHiveConfResourcesConfig() {
+        loadConfigFromFile(getResourceConfigPropName());
     }
 
     public void toPaimonOptionsAndConf(Options options, Configuration conf) {
         options.set("uri", hiveMetastoreUri);
         Map<String, String> allProps = 
loadConfigFromFile(getResourceConfigPropName());
-        allProps.forEach(conf::set);
-        conf.set("hive.metastore.authentication.type", 
hiveMetastoreAuthenticationType);
+        allProps.forEach(options::set);
+        allProps.put("hive.metastore.authentication.type", 
hiveMetastoreAuthenticationType);
         if ("kerberos".equalsIgnoreCase(hiveMetastoreAuthenticationType)) {
-            conf.set("hive.metastore.service.principal", 
hiveMetastoreServicePrincipal);
-            conf.set("hive.metastore.client.principal", 
hiveMetastoreClientPrincipal);
-            conf.set("hive.metastore.client.keytab", 
hiveMetastoreClientKeytab);
+            allProps.put("hive.metastore.service.principal", 
hiveMetastoreServicePrincipal);
+            allProps.put("hive.metastore.client.principal", 
hiveMetastoreClientPrincipal);
+            allProps.put("hive.metastore.client.keytab", 
hiveMetastoreClientKeytab);
         }
     }
 
@@ -90,11 +111,24 @@ public class HMSProperties extends MetastoreProperties {
         catalogProps.put("uri", hiveMetastoreUri);
         Map<String, String> allProps = 
loadConfigFromFile(getResourceConfigPropName());
         allProps.forEach(catalogProps::put);
-        catalogProps.put("hive.metastore.authentication.type", 
hiveMetastoreAuthenticationType);
-        if ("catalogProps".equalsIgnoreCase(hiveMetastoreAuthenticationType)) {
-            catalogProps.put("hive.metastore.service.principal", 
hiveMetastoreServicePrincipal);
-            catalogProps.put("hive.metastore.client.principal", 
hiveMetastoreClientPrincipal);
-            catalogProps.put("hive.metastore.client.keytab", 
hiveMetastoreClientKeytab);
+        allProps.put("hive.metastore.authentication.type", 
hiveMetastoreAuthenticationType);
+        if ("kerberos".equalsIgnoreCase(hiveMetastoreAuthenticationType)) {
+            allProps.put("hive.metastore.service.principal", 
hiveMetastoreServicePrincipal);
+            allProps.put("hive.metastore.client.principal", 
hiveMetastoreClientPrincipal);
+            allProps.put("hive.metastore.client.keytab", 
hiveMetastoreClientKeytab);
+        }
+    }
+
+    protected Map<String, String> loadConfigFromFile(String resourceConfig) {
+        if (Strings.isNullOrEmpty(origProps.get(resourceConfig))) {
+            return Maps.newHashMap();
+        }
+        HiveConf conf = 
CatalogConfigFileUtils.loadHiveConfFromHiveConfDir(origProps.get(resourceConfig));
+        Map<String, String> confMap = Maps.newHashMap();
+        for (Map.Entry<String, String> entry : conf) {
+            confMap.put(entry.getKey(), entry.getValue());
         }
+        return confMap;
     }
+
 }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AbstractObjectStorageProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AbstractObjectStorageProperties.java
new file mode 100644
index 00000000000..c5a92be9bf6
--- /dev/null
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AbstractObjectStorageProperties.java
@@ -0,0 +1,129 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.storage;
+
+import org.apache.doris.datasource.property.ConnectorProperty;
+
+import lombok.Getter;
+import lombok.Setter;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Abstract base class for object storage system properties. This class 
provides common configuration
+ * settings for object storage systems and supports conversion of these 
properties into configuration
+ * maps for different protocols, such as AWS S3. All object storage systems 
should extend this class
+ * to inherit the common configuration properties and methods.
+ * <p>
+ * The properties include connection settings (e.g., timeouts and maximum 
connections) and a flag to
+ * determine if path-style URLs should be used for the storage system.
+ */
+public abstract class AbstractObjectStorageProperties extends 
StorageProperties implements ObjectStorageProperties {
+
+    /**
+     * The maximum number of concurrent connections that can be made to the 
object storage system.
+     * This value is optional and can be configured by the user.
+     */
+    @Getter
+    @ConnectorProperty(names = {"maxConnections"}, required = false, 
description = "Maximum number of connections.")
+    protected int maxConnections = 100;
+
+    /**
+     * The timeout (in milliseconds) for requests made to the object storage 
system.
+     * This value is optional and can be configured by the user.
+     */
+    @Getter
+    @ConnectorProperty(names = {"requestTimeoutS"}, required = false, 
description = "Request timeout in seconds.")
+    protected int requestTimeoutS = 10000;
+
+    /**
+     * The timeout (in milliseconds) for establishing a connection to the 
object storage system.
+     * This value is optional and can be configured by the user.
+     */
+    @Getter
+    @ConnectorProperty(names = {"connectionTimeoutS"}, required = false, 
description = "Connection timeout in seconds.")
+    protected int connectionTimeoutS = 10000;
+
+    /**
+     * Flag indicating whether to use path-style URLs for the object storage 
system.
+     * This value is optional and can be configured by the user.
+     */
+    @Setter
+    @Getter
+    @ConnectorProperty(names = {"usePathStyle"}, required = false,
+            description = "Whether to use path style URL for the storage.")
+    protected boolean usePathStyle = false;
+
+    /**
+     * Constructor to initialize the object storage properties with the 
provided type and original properties map.
+     *
+     * @param type      the type of object storage system.
+     * @param origProps the original properties map.
+     */
+    protected AbstractObjectStorageProperties(Type type, Map<String, String> 
origProps) {
+        super(type, origProps);
+    }
+
+    /**
+     * Generates a map of configuration properties for AWS S3 based on the 
provided values.
+     * This map includes various AWS-specific settings like endpoint, region, 
access keys, and timeouts.
+     *
+     * @param endpoint           the AWS endpoint URL.
+     * @param region             the AWS region.
+     * @param accessKey          the AWS access key.
+     * @param secretKey          the AWS secret key.
+     * @param maxConnections     the maximum number of connections.
+     * @param requestTimeoutS    the request timeout in milliseconds.
+     * @param connectionTimeoutS the connection timeout in milliseconds.
+     * @param usePathStyle       flag indicating if path-style URLs should be 
used.
+     * @return a map containing AWS S3-specific configuration properties.
+     */
+    protected Map<String, String> generateAWSS3Properties(String endpoint, 
String region, String accessKey,
+                                                          String secretKey, 
String maxConnections,
+                                                          String 
requestTimeoutS,
+                                                          String 
connectionTimeoutS, String usePathStyle) {
+        Map<String, String> s3Props = new HashMap<>();
+        s3Props.put("AWS_ENDPOINT", endpoint);
+        s3Props.put("AWS_REGION", region);
+        s3Props.put("AWS_ACCESS_KEY", accessKey);
+        s3Props.put("AWS_SECRET_KEY", secretKey);
+        s3Props.put("AWS_MAX_CONNECTIONS", maxConnections);
+        s3Props.put("AWS_REQUEST_TIMEOUT_MS", requestTimeoutS);
+        s3Props.put("AWS_CONNECTION_TIMEOUT_MS", connectionTimeoutS);
+        s3Props.put("use_path_style", usePathStyle);
+        return s3Props;
+    }
+
+    /**
+     * Generates a map of configuration properties for AWS S3 using the 
default values from this class.
+     * This method automatically uses the values defined in the class for 
connection settings and path-style URL flag.
+     *
+     * @param endpoint  the AWS endpoint URL.
+     * @param region    the AWS region.
+     * @param accessKey the AWS access key.
+     * @param secretKey the AWS secret key.
+     * @return a map containing AWS S3-specific configuration properties.
+     */
+    protected Map<String, String> generateAWSS3Properties(String endpoint, 
String region,
+                                                          String accessKey, 
String secretKey) {
+        return generateAWSS3Properties(endpoint, region, accessKey, secretKey,
+                String.valueOf(getMaxConnections()), 
String.valueOf(getRequestTimeoutS()),
+                String.valueOf(getConnectionTimeoutS()), 
String.valueOf(isUsePathStyle()));
+    }
+}
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/COSProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/COSProperties.java
new file mode 100644
index 00000000000..2db0736cb07
--- /dev/null
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/COSProperties.java
@@ -0,0 +1,79 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.storage;
+
+import org.apache.doris.datasource.property.ConnectorProperty;
+
+import com.google.common.base.Strings;
+
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+public class COSProperties extends AbstractObjectStorageProperties {
+
+    @ConnectorProperty(names = {"cos.endpoint"},
+            required = false,
+            description = "The endpoint of COS.")
+    protected String cosEndpoint = "cos.ap-guangzhou.myqcloud.com";
+
+    @ConnectorProperty(names = {"cos.region"},
+            required = false,
+            description = "The region of COS.")
+    protected String cosRegion = "";
+
+    @ConnectorProperty(names = {"cos.access_key"},
+            description = "The access key of S3.")
+    protected String cosAccessKey = "";
+
+    @ConnectorProperty(names = {"cos.secret_key"},
+            description = "The secret key of S3.")
+    protected String cosSecretKey = "";
+
+
+    protected COSProperties(Map<String, String> origProps) {
+        super(Type.COS, origProps);
+    }
+
+    @Override
+    public void toHadoopConfiguration(Map<String, String> config) {
+        config.put("fs.cosn.bucket.region", getRegion());
+        config.put("fs.cos.endpoint", cosEndpoint);
+        config.put("fs.cosn.userinfo.secretId", cosAccessKey);
+        config.put("fs.cosn.userinfo.secretKey", cosSecretKey);
+        config.put("fs.cosn.impl", "org.apache.hadoop.fs.CosFileSystem");
+    }
+
+    @Override
+    public void toNativeS3Configuration(Map<String, String> config) {
+        config.putAll(generateAWSS3Properties(cosEndpoint, getRegion(), 
cosAccessKey, cosSecretKey));
+    }
+
+    private String getRegion() {
+        if (Strings.isNullOrEmpty(this.cosRegion)) {
+            if (cosEndpoint.contains("myqcloud.com")) {
+                Pattern cosPattern = 
Pattern.compile("cos\\.([a-z0-9-]+)\\.myqcloud\\.com");
+                Matcher matcher = cosPattern.matcher(cosEndpoint);
+                if (matcher.find()) {
+                    this.cosRegion = matcher.group(1);
+                }
+            }
+        }
+        return this.cosRegion;
+    }
+}
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/HDFSProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/HDFSProperties.java
index fd8bacbf09c..f4c684183ca 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/HDFSProperties.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/HDFSProperties.java
@@ -63,11 +63,12 @@ public class HDFSProperties extends StorageProperties {
 
     @Override
     protected String getResourceConfigPropName() {
-        return "hdfs.resource_config";
+        return "hadoop.config.resources";
     }
 
     protected void checkRequiredProperties() {
         super.checkRequiredProperties();
+        checkConfigFileIsValid(hadoopConfigResources);
         if ("kerberos".equalsIgnoreCase(hdfsAuthenticationType)) {
             if (Strings.isNullOrEmpty(hdfsKerberosPrincipal)
                     || Strings.isNullOrEmpty(hdfsKerberosKeytab)) {
@@ -77,6 +78,13 @@ public class HDFSProperties extends StorageProperties {
         }
     }
 
+    private void checkConfigFileIsValid(String configFile) {
+        if (Strings.isNullOrEmpty(configFile)) {
+            return;
+        }
+        loadConfigFromFile(getResourceConfigPropName());
+    }
+
     public void toHadoopConfiguration(Configuration conf) {
         Map<String, String> allProps = 
loadConfigFromFile(getResourceConfigPropName());
         allProps.forEach(conf::set);
@@ -85,5 +93,8 @@ public class HDFSProperties extends StorageProperties {
             conf.set("hdfs.authentication.kerberos.principal", 
hdfsKerberosPrincipal);
             conf.set("hdfs.authentication.kerberos.keytab", 
hdfsKerberosKeytab);
         }
+        if (!Strings.isNullOrEmpty(hadoopUsername)) {
+            conf.set("hadoop.username", hadoopUsername);
+        }
     }
 }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/OBSProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/OBSProperties.java
new file mode 100644
index 00000000000..2e36386fb64
--- /dev/null
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/OBSProperties.java
@@ -0,0 +1,71 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.storage;
+
+import org.apache.doris.datasource.property.ConnectorProperty;
+
+import com.google.common.base.Strings;
+
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+public class OBSProperties extends AbstractObjectStorageProperties {
+
+    @ConnectorProperty(names = {"obs.endpoint"}, required = false, description 
= "The endpoint of OBS.")
+    protected String obsEndpoint = "obs.cn-east-3.myhuaweicloud.com";
+
+    @ConnectorProperty(names = {"obs.access_key"}, description = "The access 
key of OBS.")
+    protected String obsAccessKey = "";
+
+    @ConnectorProperty(names = {"obs.secret_key"}, description = "The secret 
key of OBS.")
+    protected String obsSecretKey = "";
+
+
+    private String region;
+
+    public OBSProperties(Map<String, String> origProps) {
+        super(Type.OBS, origProps);
+        // Initialize fields from origProps
+    }
+
+    @Override
+    public void toHadoopConfiguration(Map<String, String> config) {
+        config.put("fs.obs.endpoint", obsEndpoint);
+        config.put("fs.obs.access.key", obsAccessKey);
+        config.put("fs.obs.secret.key", obsSecretKey);
+        config.put("fs.obs.impl", "org.apache.hadoop.fs.obs.OBSFileSystem");
+        //set other k v if nessesary
+    }
+
+    @Override
+    public void toNativeS3Configuration(Map<String, String> config) {
+        config.putAll(generateAWSS3Properties(obsEndpoint, getRegion(), 
obsAccessKey, obsSecretKey));
+    }
+
+    private String getRegion() {
+        if (Strings.isNullOrEmpty(this.region) && 
obsEndpoint.contains("myhuaweicloud.com")) {
+            Pattern obsPattern = 
Pattern.compile("obs\\.([a-z0-9-]+)\\.myhuaweicloud\\.com");
+            Matcher matcher = obsPattern.matcher(obsEndpoint);
+            if (matcher.find()) {
+                this.region = matcher.group(1);
+            }
+        }
+        return this.region;
+    }
+}
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/OSSProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/OSSProperties.java
new file mode 100644
index 00000000000..1498e4e7b1a
--- /dev/null
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/OSSProperties.java
@@ -0,0 +1,88 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.storage;
+
+import org.apache.doris.datasource.property.ConnectorProperty;
+
+import com.google.common.base.Strings;
+
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+public class OSSProperties extends AbstractObjectStorageProperties {
+    @ConnectorProperty(names = {"oss.endpoint"}, required = false, description 
= "The endpoint of OSS.")
+    protected String endpoint = "oss-cn-hangzhou.aliyuncs.com";
+
+    @ConnectorProperty(names = {"oss.access_key"}, description = "The access 
key of OSS.")
+    protected String accessKey = "";
+
+    @ConnectorProperty(names = {"oss.secret_key"}, description = "The secret 
key of OSS.")
+    protected String secretKey = "";
+
+    protected String region;
+
+
+    protected OSSProperties(Map<String, String> origProps) {
+        super(Type.OSS, origProps);
+    }
+
+    @Override
+    public void toHadoopConfiguration(Map<String, String> config) {
+        config.put("fs.oss.endpoint", endpoint);
+        config.put("fs.oss.accessKeyId", accessKey);
+        config.put("fs.oss.accessKeySecret", secretKey);
+        config.put("fs.oss.impl", 
"org.apache.hadoop.fs.aliyun.oss.AliyunOSSFileSystem");
+    }
+
+    @Override
+    public void toNativeS3Configuration(Map<String, String> config) {
+        config.putAll(generateAWSS3Properties(endpoint, getRegion(), 
accessKey, secretKey));
+    }
+
+    private String getRegion() {
+        // Return the region if it is already set
+        if (!Strings.isNullOrEmpty(this.region)) {
+            return region;
+        }
+
+        // Check for external endpoint and extract region
+        if (endpoint.contains("aliyuncs.com")) {
+            // Regex pattern for external endpoint (e.g., 
oss-<region>.aliyuncs.com)
+            Pattern ossPattern = 
Pattern.compile("oss-([a-z0-9-]+)\\.aliyuncs\\.com");
+            Matcher matcher = ossPattern.matcher(endpoint);
+            if (matcher.find()) {
+                this.region = matcher.group(1);
+            }
+        }
+
+        // Check for internal endpoint and extract region
+        if (endpoint.contains("intranet.aliyuncs.com")) {
+            // Regex pattern for internal endpoint (e.g., 
oss-<region>.intranet.aliyuncs.com)
+            Pattern ossIntranetPattern = 
Pattern.compile("oss-([a-z0-9-]+)\\.intranet\\.aliyuncs\\.com");
+            Matcher matcher = ossIntranetPattern.matcher(endpoint);
+            if (matcher.find()) {
+                this.region = matcher.group(1);
+            }
+        }
+
+        return this.region;
+    }
+
+
+}
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/ObjectStorageProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/ObjectStorageProperties.java
new file mode 100644
index 00000000000..a05d606d6da
--- /dev/null
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/ObjectStorageProperties.java
@@ -0,0 +1,49 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.storage;
+
+import java.util.Map;
+
+/**
+ * Interface representing the properties and configurations for object storage 
systems.
+ * This interface provides methods for converting the storage properties to 
specific
+ * configurations for different protocols, such as Hadoop HDFS and AWS S3.
+ */
+public interface ObjectStorageProperties {
+
+    /**
+     * Converts the object storage properties to a configuration map 
compatible with the
+     * Hadoop HDFS protocol. This method allows the object storage to be used 
in a Hadoop-based
+     * environment by providing the necessary configuration details in the 
form of key-value pairs.
+     *
+     * @param config a map to populate with the HDFS-compatible configuration 
parameters.
+     *               These parameters will be used by Hadoop clients to 
connect to the object storage system.
+     */
+    void toHadoopConfiguration(Map<String, String> config);
+
+    /**
+     * Converts the object storage properties to a configuration map 
compatible with the
+     * AWS S3 protocol. This method provides the necessary configuration 
parameters for connecting
+     * to the object storage system via AWS S3 API, allowing it to be used in 
S3-compatible environments.
+     *
+     * @param config a map to populate with the S3-compatible configuration 
parameters.
+     *               These parameters will be used by AWS S3 clients or 
compatible services to connect
+     *               to the object storage system.
+     */
+    void toNativeS3Configuration(Map<String, String> config);
+}
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/S3Properties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/S3Properties.java
index 6e396370829..4b1eb4e0012 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/S3Properties.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/S3Properties.java
@@ -29,27 +29,23 @@ import java.lang.reflect.Field;
 import java.util.List;
 import java.util.Map;
 
-public class S3Properties extends StorageProperties {
+public class S3Properties extends AbstractObjectStorageProperties {
 
-    @ConnectorProperty(names = {"s3.endpoint",
-            "oss.endpoint", "cos.endpoint", "obs.endpoint", "gcs.endpoint", 
"AWS_ENDPOINT"},
+    @ConnectorProperty(names = {"s3.endpoint", "AWS_ENDPOINT"},
             required = false,
             description = "The endpoint of S3.")
     protected String s3Endpoint = "";
 
-    @ConnectorProperty(names = {"s3.region",
-            "oss.region", "cos.region", "obs.region", "gcs.region", 
"AWS_REGION"},
+    @ConnectorProperty(names = {"s3.region", "AWS_REGION"},
             required = false,
             description = "The region of S3.")
     protected String s3Region = "";
 
-    @ConnectorProperty(names = {"s3.access_key",
-            "oss.access_key", "cos.access_key", "obs.access_key", 
"gcs.access_key", "AWS_ACCESS_KEY"},
+    @ConnectorProperty(names = {"s3.access_key", "AWS_ACCESS_KEY"},
             description = "The access key of S3.")
     protected String s3AccessKey = "";
 
-    @ConnectorProperty(names = {"s3.secret_key",
-            "oss.secret_key", "cos.secret_key", "obs.secret_key", 
"gcs.secret_key", "AWS_SECRET_KEY"},
+    @ConnectorProperty(names = {"s3.secret_key", "AWS_SECRET_KEY"},
             description = "The secret key of S3.")
     protected String s3SecretKey = "";
 
@@ -124,6 +120,7 @@ public class S3Properties extends StorageProperties {
     private static List<Field> getIdentifyFields() {
         List<Field> fields = Lists.newArrayList();
         try {
+            //todo AliyunDlfProperties should in OSS storage type.
             fields.add(S3Properties.class.getDeclaredField("s3Endpoint"));
             
fields.add(AliyunDLFProperties.class.getDeclaredField("dlfEndpoint"));
             
fields.add(AliyunDLFProperties.class.getDeclaredField("dlfRegion"));
@@ -156,14 +153,22 @@ public class S3Properties extends StorageProperties {
         catalogProps.put("s3.path-style-access", usePathStyle);
     }
 
-    public void toBackendS3ClientProperties(Map<String, String> s3Props) {
-        s3Props.put("AWS_ENDPOINT", s3Endpoint);
-        s3Props.put("AWS_REGION", s3Region);
-        s3Props.put("AWS_ACCESS_KEY", s3AccessKey);
-        s3Props.put("AWS_SECRET_KEY", s3SecretKey);
-        s3Props.put("AWS_MAX_CONNECTIONS", s3ConnectionMaximum);
-        s3Props.put("AWS_REQUEST_TIMEOUT_MS", s3ConnectionRequestTimeoutS);
-        s3Props.put("AWS_CONNECTION_TIMEOUT_MS", s3ConnectionTimeoutS);
-        s3Props.put("use_path_style", usePathStyle);
+    @Override
+    public void toHadoopConfiguration(Map<String, String> config) {
+        config.put("fs.s3a.access.key", s3AccessKey);  // AWS Access Key
+        config.put("fs.s3a.secret.key", s3SecretKey);  // AWS Secret Key
+        config.put("fs.s3a.endpoint", s3Endpoint);
+        config.put("fs.s3a.region", s3Region);
+        config.put("fs.s3a.connection.maximum", 
String.valueOf(s3ConnectionMaximum));
+        config.put("fs.s3a.connection.timeout", 
String.valueOf(s3ConnectionRequestTimeoutS));
+        config.put("fs.s3a.request.timeout", 
String.valueOf(s3ConnectionTimeoutS));
+        config.put("fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem");
+    }
+
+    @Override
+    public void toNativeS3Configuration(Map<String, String> config) {
+        Map<String, String> awsS3Properties = 
generateAWSS3Properties(s3Endpoint, s3Region, s3AccessKey, s3SecretKey,
+                s3ConnectionMaximum, s3ConnectionRequestTimeoutS, 
s3ConnectionTimeoutS, usePathStyle);
+        config.putAll(awsS3Properties);
     }
 }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/StorageProperties.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/StorageProperties.java
index 6bd1a58492c..8db195246e4 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/StorageProperties.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/StorageProperties.java
@@ -34,10 +34,16 @@ public class StorageProperties extends ConnectionProperties 
{
     public static final String FS_S3_SUPPORT = "fs.s3.support";
     public static final String FS_GCS_SUPPORT = "fs.gcs.support";
     public static final String FS_AZURE_SUPPORT = "fs.azure.support";
+    public static final String FS_OSS_SUPPORT = "fs.oss.support";
+    public static final String FS_OBS_SUPPORT = "fs.obs.support";
+    public static final String FS_COS_SUPPORT = "fs.cos.support";
 
     public enum Type {
         HDFS,
         S3,
+        OSS,
+        OBS,
+        COS,
         UNKNOWN
     }
 
@@ -66,6 +72,18 @@ public class StorageProperties extends ConnectionProperties {
         if (isFsSupport(origProps, FS_S3_SUPPORT) || 
S3Properties.guessIsMe(origProps)) {
             storageProperties.add(new S3Properties(origProps));
         }
+        // FIXME: This logic directly checks for FS types (OSS, OBS, COS) 
here, which is intrusive.
+        // We should refactor this so that the plugins themselves provide a 
method to check if they are supported,
+        // thus decoupling the logic and making the system more extensible.
+        if (isFsSupport(origProps, FS_OSS_SUPPORT)) {
+            storageProperties.add(new OSSProperties(origProps));
+        }
+        if (isFsSupport(origProps, FS_OBS_SUPPORT)) {
+            storageProperties.add(new OBSProperties(origProps));
+        }
+        if (isFsSupport(origProps, FS_COS_SUPPORT)) {
+            storageProperties.add(new COSProperties(origProps));
+        }
 
         if (isFsSupport(origProps, FS_GCS_SUPPORT)) {
             throw new RuntimeException("Unsupported native GCS filesystem");
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/HMSIntegrationTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/HMSIntegrationTest.java
new file mode 100644
index 00000000000..7c93bef6d76
--- /dev/null
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/HMSIntegrationTest.java
@@ -0,0 +1,222 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.metastore;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
+import shade.doris.hive.org.apache.thrift.TException;
+
+import java.io.IOException;
+import java.security.PrivilegedAction;
+import java.util.ArrayList;
+import java.util.List;
+
+@Disabled
+public class HMSIntegrationTest {
+
+    // Hive configuration file path
+    private static final String HIVE_CONF_PATH = "";
+    // krb5 configuration file path
+    private static final String KRB5_CONF_PATH = "";
+    // Path to the Kerberos keytab file
+    private static final String KEYTAB_PATH = "";
+    // Principal name for Kerberos authentication
+    private static final String PRINCIPAL_NAME = "";
+
+    private static final String QUERY_DB_NAME = "";
+    private static final String QUERY_TBL_NAME = "";
+    private static final String CREATE_TBL_NAME = "";
+    private static final String CREATE_TBL_IN_DB_NAME = "";
+    // HDFS URI for the table location
+    private static final String HDFS_URI = "";
+    private static final boolean ENABLE_EXECUTE_CREATE_TABLE_TEST = false;
+
+    @Test
+    public  void testHms() throws IOException {
+        // Set up HiveConf and Kerberos authentication
+        HiveConf hiveConf = setupHiveConf();
+        setupKerberos(hiveConf);
+
+        // Authenticate user using the provided keytab file
+        UserGroupInformation ugi = authenticateUser();
+        System.out.println("User Credentials: " + ugi.getCredentials());
+
+        // Perform Hive MetaStore client operations
+        ugi.doAs((PrivilegedAction<Void>) () -> {
+            try {
+                HiveMetaStoreClient client = 
createHiveMetaStoreClient(hiveConf);
+
+                // Get database and table information
+                getDatabaseAndTableInfo(client);
+
+                // Create a new table in Hive
+                createNewTable(client);
+
+            } catch (TException e) {
+                throw new RuntimeException("HiveMetaStoreClient operation 
failed", e);
+            }
+            return null;
+        });
+    }
+
+    /**
+     * Sets up the HiveConf object by loading necessary configuration files.
+     *
+     * @return Configured HiveConf object
+     */
+    private static HiveConf setupHiveConf() {
+        HiveConf hiveConf = new HiveConf();
+        // Load the Hive configuration file
+        hiveConf.addResource(HIVE_CONF_PATH);
+        // Set Hive Metastore URIs and Kerberos principal
+        //if not in config-site
+        //hiveConf.set("hive.metastore.uris", "");
+        //hiveConf.set("hive.metastore.sasl.enabled", "true");
+        //hiveConf.set("hive.metastore.kerberos.principal", "");
+        return hiveConf;
+    }
+
+    /**
+     * Sets up Kerberos authentication properties in the HiveConf.
+     *
+     * @param hiveConf HiveConf object to update with Kerberos settings
+     */
+    private static void setupKerberos(HiveConf hiveConf) {
+        // Set the Kerberos configuration file path
+        System.setProperty("java.security.krb5.conf", KRB5_CONF_PATH);
+        // Enable Kerberos authentication for Hadoop
+        hiveConf.set("hadoop.security.authentication", "kerberos");
+        // Set the Hive configuration for Kerberos authentication
+        UserGroupInformation.setConfiguration(hiveConf);
+    }
+
+    /**
+     * Authenticates the user using Kerberos with a provided keytab file.
+     *
+     * @return Authenticated UserGroupInformation object
+     * @throws IOException If there is an error during authentication
+     */
+    private static UserGroupInformation authenticateUser() throws IOException {
+        return 
UserGroupInformation.loginUserFromKeytabAndReturnUGI(PRINCIPAL_NAME, 
KEYTAB_PATH);
+    }
+
+    /**
+     * Creates a new HiveMetaStoreClient using the provided HiveConf.
+     *
+     * @param hiveConf The HiveConf object with configuration settings
+     * @return A new instance of HiveMetaStoreClient
+     * @throws TException If there is an error creating the client
+     */
+    private static HiveMetaStoreClient createHiveMetaStoreClient(HiveConf 
hiveConf) throws TException {
+        return new HiveMetaStoreClient(hiveConf);
+    }
+
+    /**
+     * Retrieves database and table information from the Hive MetaStore.
+     *
+     * @param client The HiveMetaStoreClient used to interact with the 
MetaStore
+     * @throws TException If there is an error retrieving database or table 
info
+     */
+    private static void getDatabaseAndTableInfo(HiveMetaStoreClient client) 
throws TException {
+        // Retrieve and print the list of databases
+        System.out.println("Databases: " + client.getAllDatabases());
+        Table tbl = client.getTable(QUERY_DB_NAME, QUERY_TBL_NAME);
+        System.out.println(tbl);
+    }
+
+    /**
+     * Creates a new table in Hive with specified metadata.
+     *
+     * @param client The HiveMetaStoreClient used to create the table
+     * @throws TException If there is an error creating the table
+     */
+    private static void createNewTable(HiveMetaStoreClient client) throws 
TException {
+        if (!ENABLE_EXECUTE_CREATE_TABLE_TEST) {
+            return;
+        }
+        // Create StorageDescriptor for the table
+        StorageDescriptor storageDescriptor = createTableStorageDescriptor();
+
+        // Create the table object and set its properties
+        Table table = new Table();
+        table.setDbName(CREATE_TBL_IN_DB_NAME);
+        table.setTableName(CREATE_TBL_NAME);
+        table.setPartitionKeys(createPartitionColumns());
+        table.setSd(storageDescriptor);
+
+        // Create the table in the Hive MetaStore
+        client.createTable(table);
+        System.out.println("Table 'exampletable' created successfully.");
+    }
+
+    /**
+     * Creates the StorageDescriptor for a table, which includes columns and 
location.
+     *
+     * @return A StorageDescriptor object containing table metadata
+     */
+    private static StorageDescriptor createTableStorageDescriptor() {
+        // Define the table columns
+        List<FieldSchema> columns = new ArrayList<>();
+        columns.add(new FieldSchema("id", "int", "ID column"));
+        columns.add(new FieldSchema("name", "string", "Name column"));
+        columns.add(new FieldSchema("age", "int", "Age column"));
+
+        // Create and configure the StorageDescriptor for the table
+        StorageDescriptor storageDescriptor = new StorageDescriptor();
+        storageDescriptor.setCols(columns);
+        storageDescriptor.setLocation(HDFS_URI);
+
+        // Configure SerDe for the table
+        SerDeInfo serDeInfo = createSerDeInfo();
+        storageDescriptor.setSerdeInfo(serDeInfo);
+
+        return storageDescriptor;
+    }
+
+    /**
+     * Creates the SerDeInfo object for the table, which defines how data is 
serialized and deserialized.
+     *
+     * @return A SerDeInfo object with the specified serialization settings
+     */
+    private static SerDeInfo createSerDeInfo() {
+        SerDeInfo serDeInfo = new SerDeInfo();
+        serDeInfo.setName("example_serde");
+        
serDeInfo.setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe");
+        return serDeInfo;
+    }
+
+    /**
+     * Creates the partition columns for the table.
+     *
+     * @return A list of FieldSchema objects representing partition columns
+     */
+    private static List<FieldSchema> createPartitionColumns() {
+        List<FieldSchema> partitionColumns = new ArrayList<>();
+        partitionColumns.add(new FieldSchema("year", "int", "Year partition"));
+        partitionColumns.add(new FieldSchema("month", "int", "Month 
partition"));
+        return partitionColumns;
+    }
+}
+
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/HMSPropertiesTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/HMSPropertiesTest.java
new file mode 100644
index 00000000000..a51e3e98b33
--- /dev/null
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/metastore/HMSPropertiesTest.java
@@ -0,0 +1,98 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.metastore;
+
+import org.apache.doris.common.Config;
+
+import org.apache.paimon.options.Options;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+
+import java.net.URL;
+import java.util.HashMap;
+import java.util.Map;
+
+public class HMSPropertiesTest {
+
+    @Test
+    public void testHiveConfDirNotExist() {
+        Map<String, String> params = new HashMap<>();
+        params.put("hive.conf.resources", "/opt/hive-site.xml");
+        params.put("metastore.type", "hms");
+        Map<String, String> finalParams = params;
+        Assertions.assertThrows(IllegalArgumentException.class, () -> 
MetastoreProperties.create(finalParams));
+    }
+
+    @Test
+    public void testHiveConfDirExist() {
+        URL hiveFileUrl = 
HMSPropertiesTest.class.getClassLoader().getResource("plugins");
+        Config.hadoop_config_dir = hiveFileUrl.getPath().toString();
+        Map<String, String> params = new HashMap<>();
+        params.put("hive.conf.resources", "hive-conf/hive1/hive-site.xml");
+        params.put("metastore.type", "hms");
+        HMSProperties hmsProperties;
+        Assertions.assertThrows(IllegalArgumentException.class, () -> 
MetastoreProperties.create(params));
+        params.put("hive.metastore.uris", "thrift://default:9083");
+        hmsProperties = (HMSProperties) MetastoreProperties.create(params);
+        Map<String, String> hiveConf = 
hmsProperties.loadConfigFromFile("hive.conf.resources");
+        Assertions.assertNotNull(hiveConf);
+        Assertions.assertEquals("/user/hive/default", 
hiveConf.get("hive.metastore.warehouse.dir"));
+    }
+
+    @Test
+    public void testBasicParamsTest() {
+        // Step 1: Set up initial parameters for HMSProperties
+        Map<String, String> params = createBaseParams();
+
+        // Step 2: Test HMSProperties to PaimonOptions and Conf conversion
+        HMSProperties hmsProperties = getHMSProperties(params);
+        testHmsToPaimonOptions(hmsProperties);
+
+        // Step 3: Test HMSProperties to Iceberg Hive Catalog properties 
conversion
+        testHmsToIcebergHiveCatalog(hmsProperties);
+
+        // Step 4: Test invalid scenario when both SASL and kerberos are 
enabled
+        params.put("hive.metastore.sasl.enabled", "true");
+        params.put("hive.metastore.authentication.type", "kerberos");
+        Assertions.assertThrows(IllegalArgumentException.class, () -> 
MetastoreProperties.create(params));
+    }
+
+    private Map<String, String> createBaseParams() {
+        Map<String, String> params = new HashMap<>();
+        params.put("metastore.type", "hms");
+        params.put("hive.metastore.uris", "thrift://127.0.0.1:9083");
+        params.put("hive.metastore.authentication.type", "simple");
+        return params;
+    }
+
+    private HMSProperties getHMSProperties(Map<String, String> params) {
+        return (HMSProperties) MetastoreProperties.create(params);
+    }
+
+    private void testHmsToPaimonOptions(HMSProperties hmsProperties) {
+        Options paimonOptions = new Options();
+        hmsProperties.toPaimonOptionsAndConf(paimonOptions);
+        Assertions.assertEquals("thrift://127.0.0.1:9083", 
paimonOptions.get("uri"));
+    }
+
+    private void testHmsToIcebergHiveCatalog(HMSProperties hmsProperties) {
+        Map<String, String> icebergMSParams = new HashMap<>();
+        hmsProperties.toIcebergHiveCatalogProperties(icebergMSParams);
+        Assertions.assertEquals("thrift://127.0.0.1:9083", 
icebergMSParams.get("uri"));
+    }
+}
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/COSPropertiesTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/COSPropertiesTest.java
new file mode 100644
index 00000000000..46d63b33209
--- /dev/null
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/COSPropertiesTest.java
@@ -0,0 +1,138 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.storage;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.HashMap;
+import java.util.Map;
+
+public class COSPropertiesTest {
+    private Map<String, String> origProps;
+
+    private static String secretKey = "";
+    private static String accessKey = "";
+    private static String hdfsPath = "";
+
+    @BeforeEach
+    public void setUp() {
+        origProps = new HashMap<>();
+    }
+
+    @Test
+    public void testCOSProperties() {
+        origProps.put("cos.endpoint", "https://cos.example.com";);
+        origProps.put("cos.access_key", "myCOSAccessKey");
+        origProps.put("cos.secret_key", "myCOSSecretKey");
+        origProps.put("cos.region", "us-west-1");
+        origProps.put("cos.max_connections", "100");
+        origProps.put("cos.request_timeout", "3000");
+        origProps.put("cos.connection_timeout", "1000");
+        origProps.put("cos.use_path_style", "true");
+        origProps.put(StorageProperties.FS_COS_SUPPORT, "true");
+        COSProperties cosProperties = (COSProperties) 
StorageProperties.create(origProps).get(1);
+        Map<String, String> config = new HashMap<>();
+        cosProperties.toHadoopConfiguration(config);
+
+        // Validate the configuration
+        Assertions.assertEquals("https://cos.example.com";, 
config.get("fs.cos.endpoint"));
+        Assertions.assertEquals("myCOSAccessKey", 
config.get("fs.cosn.userinfo.secretId"));
+        Assertions.assertEquals("myCOSSecretKey", 
config.get("fs.cosn.userinfo.secretKey"));
+        origProps = new HashMap<>();
+        origProps.put("cos.endpoint", "https://cos.example.com";);
+        origProps.put(StorageProperties.FS_COS_SUPPORT, "true");
+        Assertions.assertThrows(IllegalArgumentException.class, () -> 
StorageProperties.create(origProps), "Property cos.access_key is required.");
+        origProps.put("cos.access_key", "myCOSAccessKey");
+        Assertions.assertThrows(IllegalArgumentException.class, () -> 
StorageProperties.create(origProps), "Property cos.secret_key is required.");
+        origProps.put("cos.secret_key", "myCOSSecretKey");
+        //no any exception
+        StorageProperties.create(origProps);
+    }
+
+    @Test
+    public void testToNativeS3Configuration() {
+        origProps.put("cos.endpoint", "cos.ap-beijing.myqcloud.com");
+        origProps.put("cos.access_key", "myCOSAccessKey");
+        origProps.put("cos.secret_key", "myCOSSecretKey");
+        origProps.put(StorageProperties.FS_COS_SUPPORT, "true");
+        //origProps.put("cos.region", "ap-beijing");
+
+        COSProperties cosProperties = (COSProperties) 
StorageProperties.create(origProps).get(1);
+        Map<String, String> s3Props = new HashMap<>();
+        cosProperties.toNativeS3Configuration(s3Props);
+
+        // Validate the S3 properties
+        Assertions.assertEquals("cos.ap-beijing.myqcloud.com", 
s3Props.get("AWS_ENDPOINT"));
+        Assertions.assertEquals("ap-beijing", s3Props.get("AWS_REGION"));
+        Assertions.assertEquals("myCOSAccessKey", 
s3Props.get("AWS_ACCESS_KEY"));
+        Assertions.assertEquals("myCOSSecretKey", 
s3Props.get("AWS_SECRET_KEY"));
+        // Add any additional assertions for other properties if needed
+    }
+
+    /**
+     * This test method is used for verifying the connectivity and integration 
between
+     * the COS (Cloud Object Storage) and HDFS (Hadoop Distributed File 
System) by
+     * setting COS-specific properties and testing the ability to list files 
from an
+     * HDFS path.
+     * <p>
+     * The method:
+     * 1. Sets COS properties such as endpoint, access key, and secret key.
+     * 2. Converts COS properties to HDFS configuration.
+     * 3. Uses the HDFS configuration to connect to the file system.
+     * 4. Lists the files in the specified HDFS path and prints the file paths 
to the console.
+     * <p>
+     * Note:
+     * This test is currently disabled (@Disabled) and will not be executed 
unless enabled.
+     * The test requires valid COS credentials (access key and secret key) and 
a valid
+     * HDFS path to function correctly.
+     *
+     * @throws URISyntaxException if the URI for the HDFS path is malformed.
+     * @throws IOException        if there are issues with file system access 
or COS properties.
+     */
+    @Disabled
+    @Test
+    public void testCOSHdfsPropertiesTest() throws URISyntaxException, 
IOException {
+        origProps.put("cos.endpoint", "cos.ap-beijing.myqcloud.com");
+        origProps.put("cos.access_key", accessKey);
+        origProps.put("cos.secret_key", secretKey);
+        origProps.put(StorageProperties.FS_COS_SUPPORT, "true");
+        COSProperties cosProperties = (COSProperties) 
StorageProperties.create(origProps).get(1);
+
+        Map<String, String> hdfsParams = new HashMap<>();
+        cosProperties.toHadoopConfiguration(hdfsParams);
+        Configuration configuration = new Configuration(false);
+        for (Map.Entry<String, String> entry : hdfsParams.entrySet()) {
+            configuration.set(entry.getKey(), entry.getValue());
+        }
+        FileSystem fs = FileSystem.get(new URI(hdfsPath), configuration);
+        FileStatus[] fileStatuses = fs.listStatus(new Path(hdfsPath));
+        for (FileStatus status : fileStatuses) {
+            System.out.println("File Path: " + status.getPath());
+        }
+    }
+}
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/HDFSPropertiesTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/HDFSPropertiesTest.java
index e99f725c2a1..b08e15ce7dc 100644
--- 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/HDFSPropertiesTest.java
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/HDFSPropertiesTest.java
@@ -17,11 +17,14 @@
 
 package org.apache.doris.datasource.property.storage;
 
+import org.apache.doris.common.Config;
+
 import com.google.common.collect.Maps;
 import org.apache.hadoop.conf.Configuration;
 import org.junit.Test;
 import org.junit.jupiter.api.Assertions;
 
+import java.net.URL;
 import java.util.List;
 import java.util.Map;
 
@@ -30,44 +33,84 @@ public class HDFSPropertiesTest {
 
     @Test
     public void testBasicHdfsCreate() {
-        Map<String, String> origProps = Maps.newHashMap();
-        origProps.put(StorageProperties.FS_HDFS_SUPPORT, "true");
-        //tobe fixed
-        /*
-        Assertions.assertThrowsExactly(IllegalArgumentException.class, () -> {
-            StorageProperties.create(origProps);
-        },"Property hadoop.username is required.");
-        origProps.put("hadoop.username", "hadoop");
-        Assertions.assertThrowsExactly(IllegalArgumentException.class, () -> {
-            StorageProperties.create(origProps);
-        },"Property hadoop.config.resources is required.");
-        origProps.put("hadoop.config.resources", "/hadoop-test/");
-        */
+        // Test 1: Check default authentication type (should be "simple")
+        Map<String, String> origProps = createBaseHdfsProperties();
         List<StorageProperties> storageProperties = 
StorageProperties.create(origProps);
         HDFSProperties hdfsProperties = (HDFSProperties) 
storageProperties.get(0);
         Configuration conf = new Configuration();
         hdfsProperties.toHadoopConfiguration(conf);
         Assertions.assertEquals("simple", 
conf.get("hadoop.security.authentication"));
+
+        // Test 2: Kerberos without necessary configurations (should throw 
exception)
+        origProps.put("hdfs.authentication.type", "kerberos");
+        assertKerberosConfigException(origProps, "HDFS authentication type is 
kerberos, but principal or keytab is not set");
+
+        // Test 3: Kerberos with missing principal (should throw exception)
+        origProps.put("hdfs.authentication.kerberos.principal", "hadoop");
+        assertKerberosConfigException(origProps, "HDFS authentication type is 
kerberos, but principal or keytab is not set");
+
+        // Test 4: Kerberos with complete config (should succeed)
+        origProps.put("hdfs.authentication.kerberos.keytab", "keytab");
+        HDFSProperties properties = (HDFSProperties) 
StorageProperties.create(origProps).get(0);  // No exception expected
+        Configuration configuration = new Configuration(false);
+        properties.toHadoopConfiguration(configuration);
+        Assertions.assertEquals("kerberos", 
configuration.get("hdfs.authentication.type"));
+        Assertions.assertEquals("hadoop", 
configuration.get("hdfs.authentication.kerberos.principal"));
+        Assertions.assertEquals("keytab", 
configuration.get("hdfs.authentication.kerberos.keytab"));
     }
 
     @Test
-    public void testBasicS3Create() {
+    public void testBasicHdfsPropertiesCreateByConfigFile() {
+        // Test 1: Check loading of config resources
+        Map<String, String> origProps = createBaseHdfsProperties();
+        URL hiveFileUrl = 
HDFSPropertiesTest.class.getClassLoader().getResource("plugins");
+        Config.hadoop_config_dir = hiveFileUrl.getPath().toString() + 
"/hadoop_conf";
+        origProps.put("hadoop.config.resources", 
"hadoop/core-site.xml,hadoop/hdfs-site.xml");
+
+        // Test 2: Missing config resources (should throw exception)
+        assertConfigResourceException(origProps, "Config resource file does 
not exist");
+
+        // Test 3: Valid config resources (should succeed)
+        origProps.put("hadoop.config.resources", 
"hadoop1/core-site.xml,hadoop1/hdfs-site.xml");
+        List<StorageProperties> storageProperties = 
StorageProperties.create(origProps);
+        HDFSProperties hdfsProperties = (HDFSProperties) 
storageProperties.get(0);
+        Configuration conf = new Configuration();
+        hdfsProperties.toHadoopConfiguration(conf);
+        Assertions.assertEquals("hdfs://localhost:9000", 
conf.get("fs.defaultFS"));
+        Assertions.assertEquals("ns1", conf.get("dfs.nameservices"));
+
+        // Test 4: Kerberos without necessary configurations (should throw 
exception)
+        origProps.put("hdfs.authentication.type", "kerberos");
+        assertKerberosConfigException(origProps, "HDFS authentication type is 
kerberos, but principal or keytab is not set");
+
+        // Test 5: Kerberos with missing principal (should throw exception)
+        origProps.put("hdfs.authentication.kerberos.principal", "hadoop");
+        assertKerberosConfigException(origProps, "HDFS authentication type is 
kerberos, but principal or keytab is not set");
+
+        // Test 6: Kerberos with complete config (should succeed)
+        origProps.put("hdfs.authentication.kerberos.keytab", "keytab");
+        hdfsProperties = (HDFSProperties) 
StorageProperties.create(origProps).get(0);  // No exception expected
+        Configuration configuration = new Configuration(false);
+        hdfsProperties.toHadoopConfiguration(configuration);
+        Assertions.assertEquals("kerberos", 
configuration.get("hdfs.authentication.type"));
+        Assertions.assertEquals("hadoop", 
configuration.get("hdfs.authentication.kerberos.principal"));
+        Assertions.assertEquals("keytab", 
configuration.get("hdfs.authentication.kerberos.keytab"));
+        Assertions.assertEquals("hdfs://localhost:9000", 
configuration.get("fs.defaultFS"));
+
+    }
+
+    // Helper methods to reduce code duplication
+    private Map<String, String> createBaseHdfsProperties() {
         Map<String, String> origProps = Maps.newHashMap();
-        origProps.put(StorageProperties.FS_S3_SUPPORT, "true");
-        //fixme s3 properties don't need hadoop.config.resources and 
hadoop.username
-        origProps.put("hadoop.config.resources", "/hadoop-test/");
-        origProps.put("hadoop.username", "hadoop");
-        Assertions.assertThrowsExactly(IllegalArgumentException.class, () -> {
-            StorageProperties.create(origProps);
-        }, "Property s3.access_key is required.");
-        // s3 properties
-        origProps.put("s3.access_key", "access_key");
-        Assertions.assertThrowsExactly(IllegalArgumentException.class, () -> {
-            StorageProperties.create(origProps);
-        }, "Property s3.secret.key is required.");
-        origProps.put("s3.secret_key", "secret_key");
-        S3Properties s3Properties = (S3Properties) 
StorageProperties.create(origProps).get(1);
-        s3Properties.toBackendS3ClientProperties(origProps);
+        origProps.put(StorageProperties.FS_HDFS_SUPPORT, "true");
+        return origProps;
     }
 
+    private void assertKerberosConfigException(Map<String, String> origProps, 
String expectedMessage) {
+        Assertions.assertThrows(IllegalArgumentException.class, () -> 
StorageProperties.create(origProps), expectedMessage);
+    }
+
+    private void assertConfigResourceException(Map<String, String> origProps, 
String expectedMessage) {
+        Assertions.assertThrows(IllegalArgumentException.class, () -> 
StorageProperties.create(origProps), expectedMessage);
+    }
 }
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/OBSPropertyTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/OBSPropertyTest.java
new file mode 100644
index 00000000000..90742e61783
--- /dev/null
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/OBSPropertyTest.java
@@ -0,0 +1,126 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.storage;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.HashMap;
+import java.util.Map;
+
+public class OBSPropertyTest {
+    private Map<String, String> origProps = new HashMap<>();
+
+    @Test
+    public void testBasicCreateTest() {
+        //Map<String, String> origProps = new HashMap<>();
+        origProps.put("obs.endpoint", "https://obs.example.com";);
+        origProps.put("obs.access_key", "myOBSAccessKey");
+        origProps.put("obs.secret_key", "myOBSSecretKey");
+        origProps.put(StorageProperties.FS_OBS_SUPPORT, "true");
+
+        ObjectStorageProperties properties = (ObjectStorageProperties) 
StorageProperties.create(origProps).get(1);
+        properties.toHadoopConfiguration(origProps);
+
+        Assertions.assertEquals("https://obs.example.com";, 
origProps.get("fs.obs.endpoint"));
+        Assertions.assertEquals("myOBSAccessKey", 
origProps.get("fs.obs.access.key"));
+        Assertions.assertEquals("myOBSSecretKey", 
origProps.get("fs.obs.secret.key"));
+        Assertions.assertEquals("org.apache.hadoop.fs.obs.OBSFileSystem", 
origProps.get("fs.obs.impl"));
+
+        // Test creation without additional properties
+        origProps = new HashMap<>();
+        origProps.put("obs.endpoint", "https://obs.example.com";);
+        origProps.put(StorageProperties.FS_OBS_SUPPORT, "true");
+
+        Assertions.assertThrows(IllegalArgumentException.class, () -> 
StorageProperties.create(origProps), "Property obs.access_key is required.");
+        origProps.put("obs.access_key", "myOBSAccessKey");
+        Assertions.assertThrows(IllegalArgumentException.class, () -> 
StorageProperties.create(origProps), "Property obs.secret_key is required.");
+        origProps.put("obs.secret_key", "myOBSSecretKey");
+        StorageProperties.create(origProps);
+    }
+
+    @Test
+    public void testToNativeS3Configuration() {
+        origProps.put("obs.access_key", "myOBSAccessKey");
+        origProps.put("obs.secret_key", "myOBSSecretKey");
+        origProps.put("obs.endpoint", "obs.cn-north-4.myhuaweicloud.com");
+        origProps.put(StorageProperties.FS_OBS_SUPPORT, "true");
+        OBSProperties obsProperties = (OBSProperties) 
StorageProperties.create(origProps).get(1);
+        Map<String, String> s3Props = new HashMap<>();
+
+
+        obsProperties.toNativeS3Configuration(s3Props);
+        Assertions.assertEquals("obs.cn-north-4.myhuaweicloud.com", 
s3Props.get("AWS_ENDPOINT"));
+        Assertions.assertEquals("cn-north-4", s3Props.get("AWS_REGION"));
+        Assertions.assertEquals("myOBSAccessKey", 
s3Props.get("AWS_ACCESS_KEY"));
+        Assertions.assertEquals("myOBSSecretKey", 
s3Props.get("AWS_SECRET_KEY"));
+    }
+
+    private static String obsAccessKey = "";
+    private static String obsSecretKey = "";
+    private static String hdfsPath = "";
+
+    /**
+     * This test method verifies the integration of OBS (Object Storage 
Service) with HDFS
+     * by setting OBS-specific properties and testing the ability to list 
files from an
+     * HDFS path. It demonstrates how OBS properties can be converted into 
HDFS configuration
+     * settings and used to interact with HDFS.
+     * <p>
+     * The method:
+     * 1. Sets OBS properties such as access key, secret key, and endpoint.
+     * 2. Converts OBS properties to HDFS configuration using the 
`toHadoopConfiguration()` method.
+     * 3. Uses the HDFS configuration to connect to the file system.
+     * 4. Lists the files in the specified HDFS path and prints the file paths 
to the console.
+     * <p>
+     * Note:
+     * This test is currently disabled (@Disabled) and will not be executed 
unless enabled.
+     * The test requires valid OBS credentials (access key and secret key) and 
a valid
+     * HDFS path to function correctly.
+     *
+     * @throws URISyntaxException if the URI for the HDFS path is malformed.
+     * @throws IOException        if there are issues with file system access 
or OBS properties.
+     */
+    @Disabled
+    @Test
+    public void testToHadoopConfiguration() throws URISyntaxException, 
IOException {
+        origProps.put("obs.access_key", obsAccessKey);
+        origProps.put("obs.secret_key", obsSecretKey);
+        origProps.put("obs.endpoint", "obs.cn-north-4.myhuaweicloud.com");
+        origProps.put(StorageProperties.FS_OBS_SUPPORT, "true");
+        OBSProperties obsProperties = (OBSProperties) 
StorageProperties.create(origProps).get(1);
+        Map<String, String> hdfsParams = new HashMap<>();
+        obsProperties.toHadoopConfiguration(hdfsParams);
+        Configuration configuration = new Configuration(false);
+        for (Map.Entry<String, String> entry : hdfsParams.entrySet()) {
+            configuration.set(entry.getKey(), entry.getValue());
+        }
+        FileSystem fs = FileSystem.get(new URI(hdfsPath), configuration);
+        FileStatus[] fileStatuses = fs.listStatus(new Path(hdfsPath));
+        for (FileStatus status : fileStatuses) {
+            System.out.println("File Path: " + status.getPath());
+        }
+    }
+}
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/OSSPropertiesTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/OSSPropertiesTest.java
new file mode 100644
index 00000000000..097b002f5c8
--- /dev/null
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/OSSPropertiesTest.java
@@ -0,0 +1,120 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.storage;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.HashMap;
+import java.util.Map;
+
+public class OSSPropertiesTest {
+
+    private static String ossAccessKey = "";
+    private static String ossSecretKey = "";
+    private static String hdfsPath = "";
+
+    @Test
+    public void testBasicCreateTest() {
+        Map<String, String> origProps = new HashMap<>();
+        origProps.put("oss.endpoint", "https://oss.aliyuncs.com";);
+        origProps.put("oss.access_key", "myOSSAccessKey");
+        origProps.put("oss.secret_key", "myOSSSecretKey");
+        origProps.put(StorageProperties.FS_OSS_SUPPORT, "true");
+        ObjectStorageProperties properties = (ObjectStorageProperties) 
StorageProperties.create(origProps).get(1);
+        properties.toHadoopConfiguration(origProps);
+        Assertions.assertEquals("https://oss.aliyuncs.com";, 
origProps.get("fs.oss.endpoint"));
+        Assertions.assertEquals("myOSSAccessKey", 
origProps.get("fs.oss.accessKeyId"));
+        Assertions.assertEquals("myOSSSecretKey", 
origProps.get("fs.oss.accessKeySecret"));
+        
Assertions.assertEquals("org.apache.hadoop.fs.aliyun.oss.AliyunOSSFileSystem", 
origProps.get("fs.oss.impl"));
+        origProps = new HashMap<>();
+        origProps.put("oss.endpoint", "https://oss.aliyuncs.com";);
+        StorageProperties.create(origProps);
+    }
+
+
+    @Test
+    public void testToNativeS3Configuration() {
+        Map<String, String> origProps = new HashMap<>();
+        origProps.put("oss.access_key", "myOSSAccessKey");
+        origProps.put("oss.secret_key", "myOSSSecretKey");
+        origProps.put("oss.endpoint", "oss-cn-beijing-internal.aliyuncs.com");
+        origProps.put(StorageProperties.FS_OSS_SUPPORT, "true");
+        OSSProperties ossProperties = (OSSProperties) 
StorageProperties.create(origProps).get(1);
+        Map<String, String> s3Props = new HashMap<>();
+
+
+        ossProperties.toNativeS3Configuration(s3Props);
+        Assertions.assertEquals("oss-cn-beijing-internal.aliyuncs.com", 
s3Props.get("AWS_ENDPOINT"));
+        Assertions.assertEquals("cn-beijing-internal", 
s3Props.get("AWS_REGION"));
+        Assertions.assertEquals("myOSSAccessKey", 
s3Props.get("AWS_ACCESS_KEY"));
+        Assertions.assertEquals("myOSSSecretKey", 
s3Props.get("AWS_SECRET_KEY"));
+    }
+
+
+    /**
+     * This test method verifies the integration between OSS (Object Storage 
Service)
+     * and HDFS by setting OSS-specific properties and testing the ability to 
list
+     * files from an HDFS path. It demonstrates how OSS properties can be 
converted
+     * into Hadoop configuration settings and used to interact with HDFS.
+     * <p>
+     * The method:
+     * 1. Sets OSS properties such as access key, secret key, and endpoint.
+     * 2. Converts OSS properties to HDFS configuration using the 
`toHadoopConfiguration()` method.
+     * 3. Uses the HDFS configuration to connect to the file system.
+     * 4. Lists the files in the specified HDFS path and prints the file paths 
to the console.
+     * <p>
+     * Note:
+     * This test is currently disabled (@Disabled) and will not be executed 
unless enabled.
+     * The test requires valid OSS credentials (access key and secret key) and 
a valid
+     * HDFS path to function correctly.
+     *
+     * @throws URISyntaxException if the URI for the HDFS path is malformed.
+     * @throws IOException        if there are issues with file system access 
or OSS properties.
+     */
+    @Disabled
+    @Test
+    public void testOSSHdfsProperties() throws IOException, URISyntaxException 
{
+        Map<String, String> origProps = new HashMap<>();
+        origProps.put("oss.access_key", ossAccessKey);
+        origProps.put("oss.secret_key", ossSecretKey);
+        origProps.put("oss.endpoint", "oss-cn-beijing-internal.aliyuncs.com");
+        origProps.put(StorageProperties.FS_OSS_SUPPORT, "true");
+        OSSProperties ossProperties = (OSSProperties) 
StorageProperties.create(origProps).get(1);
+        // ossParams.put("fs.AbstractFileSystem.oss.impl", 
"com.aliyun.jindodata.oss.JindoOSS");
+        Map<String, String> hadoopParams = new HashMap<>();
+        ossProperties.toHadoopConfiguration(hadoopParams);
+        Configuration configuration = new Configuration(false);
+        for (Map.Entry<String, String> entry : hadoopParams.entrySet()) {
+            configuration.set(entry.getKey(), entry.getValue());
+        }
+        FileSystem fs = FileSystem.get(new URI(hdfsPath), configuration);
+        FileStatus[] fileStatuses = fs.listStatus(new Path(hdfsPath));
+        for (FileStatus status : fileStatuses) {
+            System.out.println("File Path: " + status.getPath());
+        }
+    }
+}
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/S3PropertiesTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/S3PropertiesTest.java
new file mode 100644
index 00000000000..83eb06afc8b
--- /dev/null
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/S3PropertiesTest.java
@@ -0,0 +1,134 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.datasource.property.storage;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.HashMap;
+import java.util.Map;
+
+public class S3PropertiesTest {
+    private Map<String, String> origProps;
+
+    private static String secretKey = "";
+    private static String accessKey = "";
+    private static String hdfsPath = "";
+
+    @BeforeEach
+    public void setUp() {
+        origProps = new HashMap<>();
+    }
+
+    @Test
+    public void testS3Properties() {
+        origProps.put("s3.endpoint", "https://cos.example.com";);
+        origProps.put("s3.access_key", "myS3AccessKey");
+        origProps.put("s3.secret_key", "myS3SecretKey");
+        origProps.put("s3.region", "us-west-1");
+        origProps.put(StorageProperties.FS_S3_SUPPORT, "true");
+        S3Properties s3Properties = (S3Properties) 
StorageProperties.create(origProps).get(1);
+        Map<String, String> config = new HashMap<>();
+        s3Properties.toHadoopConfiguration(config);
+
+        // Validate the configuration
+        Assertions.assertEquals("myS3AccessKey", 
config.get("fs.s3a.access.key"));
+        Assertions.assertEquals("myS3SecretKey", 
config.get("fs.s3a.secret.key"));
+        Assertions.assertEquals("us-west-1", config.get("fs.s3a.region"));
+        origProps = new HashMap<>();
+        origProps.put("s3.endpoint", "https://s3.example.com";);
+        origProps.put(StorageProperties.FS_S3_SUPPORT, "true");
+        Assertions.assertThrows(IllegalArgumentException.class, () -> 
StorageProperties.create(origProps), "Property cos.access_key is required.");
+        origProps.put("s3.access_key", "myS3AccessKey");
+        Assertions.assertThrows(IllegalArgumentException.class, () -> 
StorageProperties.create(origProps), "Property cos.secret_key is required.");
+        origProps.put("s3.secret_key", "myS3SecretKey");
+        StorageProperties.create(origProps);
+    }
+
+    @Test
+    public void testToNativeS3Configuration() {
+        origProps.put("s3.endpoint", "https://cos.example.com";);
+        origProps.put("s3.access_key", "myS3AccessKey");
+        origProps.put("s3.secret_key", "myS3SecretKey");
+        origProps.put("s3.region", "us-west-1");
+        origProps.put(StorageProperties.FS_S3_SUPPORT, "true");
+
+        S3Properties s3Properties = (S3Properties) 
StorageProperties.create(origProps).get(1);
+        Map<String, String> s3Props = new HashMap<>();
+        s3Properties.toNativeS3Configuration(s3Props);
+
+        // Validate the S3 properties
+        Assertions.assertEquals("https://cos.example.com";, 
s3Props.get("AWS_ENDPOINT"));
+        Assertions.assertEquals("us-west-1", s3Props.get("AWS_REGION"));
+        Assertions.assertEquals("myS3AccessKey", 
s3Props.get("AWS_ACCESS_KEY"));
+        Assertions.assertEquals("myS3SecretKey", 
s3Props.get("AWS_SECRET_KEY"));
+        // Add any additional assertions for other properties if needed
+    }
+
+    /**
+     * This test method verifies the integration between S3 (Amazon Simple 
Storage Service)
+     * and HDFS by setting S3-specific properties and testing the ability to 
list files
+     * from an HDFS path. It demonstrates how S3 properties can be converted 
into
+     * Hadoop configuration settings and used to interact with HDFS.
+     * <p>
+     * The method:
+     * 1. Sets S3 properties such as access key, secret key, endpoint, and 
region.
+     * 2. Converts S3 properties to HDFS configuration using the 
`toHadoopConfiguration()` method.
+     * 3. Uses the HDFS configuration to connect to the file system.
+     * 4. Lists the files in the specified HDFS path and prints the file paths 
to the console.
+     * <p>
+     * Note:
+     * This test is currently disabled (@Disabled) and will not be executed 
unless enabled.
+     * The test requires valid S3 credentials (access key and secret key) and 
a valid
+     * HDFS path to function correctly.
+     *
+     * @throws URISyntaxException if the URI for the HDFS path is malformed.
+     * @throws IOException        if there are issues with file system access 
or S3 properties.
+     */
+    @Disabled
+    @Test
+    public void testS3HdfsPropertiesTest() throws URISyntaxException, 
IOException {
+        origProps.put("s3.endpoint", "s3.ap-northeast-1.amazonaws.com");
+        origProps.put("s3.access_key", accessKey);
+        origProps.put("s3.secret_key", secretKey);
+        origProps.put("s3.region", "ap-northeast-1");
+        origProps.put(StorageProperties.FS_S3_SUPPORT, "true");
+        S3Properties s3Properties = (S3Properties) 
StorageProperties.create(origProps).get(1);
+
+        Map<String, String> hdfsParams = new HashMap<>();
+        s3Properties.toHadoopConfiguration(hdfsParams);
+        Configuration configuration = new Configuration(false);
+        for (Map.Entry<String, String> entry : hdfsParams.entrySet()) {
+            configuration.set(entry.getKey(), entry.getValue());
+        }
+        FileSystem fs = FileSystem.get(new URI(hdfsPath), configuration);
+        FileStatus[] fileStatuses = fs.listStatus(new Path(hdfsPath));
+        for (FileStatus status : fileStatuses) {
+            System.out.println("File Path: " + status.getPath());
+        }
+    }
+}
diff --git 
a/fe/fe-core/src/test/resources/plugins/hadoop_conf/hadoop1/core-site.xml 
b/fe/fe-core/src/test/resources/plugins/hadoop_conf/hadoop1/core-site.xml
new file mode 100644
index 00000000000..60afa0094d5
--- /dev/null
+++ b/fe/fe-core/src/test/resources/plugins/hadoop_conf/hadoop1/core-site.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<configuration>
+    <!-- Hadoop core-site configuration -->
+
+    <!-- Specify the URI for the NameNode -->
+    <property>
+        <name>fs.defaultFS</name>
+        <value>hdfs://localhost:9000</value>
+    </property>
+
+    <!-- Hadoop temporary directory for intermediate data -->
+    <property>
+        <name>hadoop.tmp.dir</name>
+        <value>/tmp/hadoop</value>
+    </property>
+
+    <!-- Define the Hadoop home directory -->
+    <property>
+        <name>hadoop.home.dir</name>
+        <value>/usr/local/hadoop</value>
+    </property>
+</configuration>
\ No newline at end of file
diff --git 
a/fe/fe-core/src/test/resources/plugins/hadoop_conf/hadoop1/hdfs-site.xml 
b/fe/fe-core/src/test/resources/plugins/hadoop_conf/hadoop1/hdfs-site.xml
new file mode 100644
index 00000000000..bbca64afdb3
--- /dev/null
+++ b/fe/fe-core/src/test/resources/plugins/hadoop_conf/hadoop1/hdfs-site.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<configuration>
+    <!-- HDFS-specific configuration -->
+
+    <!-- Directory for storing HDFS data -->
+    <property>
+        <name>dfs.data.dir</name>
+        <value>/data/hdfs</value>
+    </property>
+
+    <!-- Directory for storing HDFS NameNode data -->
+    <property>
+        <name>dfs.name.dir</name>
+        <value>/data/hdfs/namenode</value>
+    </property>
+
+    <!-- Replication factor for HDFS -->
+    <property>
+        <name>dfs.replication</name>
+        <value>3</value>
+    </property>
+
+    <!-- HDFS block size -->
+    <property>
+        <name>dfs.blocksize</name>
+        <value>134217728</value> <!-- 128 MB -->
+    </property>
+    <property>
+        <name>dfs.nameservices</name>
+        <value>ns1</value>
+    </property>
+</configuration>
\ No newline at end of file
diff --git 
a/fe/fe-core/src/test/resources/plugins/hive-conf/hive1/hive-site.xml 
b/fe/fe-core/src/test/resources/plugins/hive-conf/hive1/hive-site.xml
new file mode 100644
index 00000000000..fbac5346b7f
--- /dev/null
+++ b/fe/fe-core/src/test/resources/plugins/hive-conf/hive1/hive-site.xml
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+    <property>
+        <name>hive.metastore.warehouse.dir</name>
+        <value>/user/hive/default</value>
+    </property>
+</configuration> 
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to