This is an automated email from the ASF dual-hosted git repository.

jinsongzhou pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/amoro.git


The following commit(s) were added to refs/heads/master by this push:
     new f88b7a6ee [AMORO-3301] Support OSS for iceberg in InternalCatalog 
(#3306)
f88b7a6ee is described below

commit f88b7a6ee9ff4a90d30934aa402632c9b0b996a4
Author: veli.yang <[email protected]>
AuthorDate: Thu Nov 21 12:19:47 2024 +0800

    [AMORO-3301] Support OSS for iceberg in InternalCatalog (#3306)
    
    * fix: support support OSS for iceberg in InternalCatalog
    
    * bugfix
    
    * bugfix
    
    * fix: add aliyun-sdk-oss
    
    * fix: add restcatalog api test for oss support
    
    * fix: add license header
    
    * fix: ci
    
    * bugfix: oss-sdk adapt to iceberg version
    
    * bugfix: oss-sdk adapt to iceberg version
    
    * fix: aliyun-sdk add scope provided
    
    * fix: aliyun-oss-sdk default provided
    
    * fix: readme
    
    * fix: ci
    
    * fix: ci
    
    * fix: add readme for htttclient
    
    ---------
    
    Co-authored-by: ConradJam <[email protected]>
    Co-authored-by: ZhouJinsong <[email protected]>
---
 README.md                                          |  1 +
 .../dashboard/controller/CatalogController.java    | 85 +++++++++++++++++-----
 .../table/internal/InternalTableConstants.java     |  2 +
 .../amoro/server/utils/InternalTableUtil.java      |  4 +
 .../amoro/properties/CatalogMetaProperties.java    |  4 +-
 amoro-format-iceberg/pom.xml                       | 18 ++++-
 .../v3.2/amoro-mixed-spark-runtime-3.2/pom.xml     |  1 +
 .../v3.3/amoro-mixed-spark-3.3/pom.xml             |  2 +-
 .../v3.3/amoro-mixed-spark-runtime-3.3/pom.xml     |  1 +
 amoro-web/src/views/catalogs/Detail.vue            | 39 ++++++++--
 http/README.md                                     | 62 ++++++++++++++++
 http/RestCatalogLocalTest.http                     | 54 ++++++++++++++
 pom.xml                                            | 21 ++++++
 13 files changed, 269 insertions(+), 25 deletions(-)

diff --git a/README.md b/README.md
index d5bf8506a..49bef1fe5 100644
--- a/README.md
+++ b/README.md
@@ -119,6 +119,7 @@ Amoro is built using Maven with JDK 8 and JDK 17(only for 
`amoro-format-mixed/am
 * Build and skip tests: `mvn clean package -DskipTests`
 * Build and skip dashboard: `mvn clean package -Pskip-dashboard-build`
 * Build and disable disk storage, RocksDB will NOT be introduced to avoid 
memory overflow: `mvn clean package -DskipTests -Pno-extented-disk-storage`
+* Build and enable aliyun-oss-sdk: `mvn clean package -DskipTests 
-Paliyun-oss-sdk`
 * Build with hadoop 2.x(the default is 3.x) dependencies: `mvn clean package 
-DskipTests -Phadoop2`
 * Specify Flink version for Flink optimizer(the default is 1.20.0): `mvn clean 
package -DskipTests -Dflink-optimizer.flink-version=1.20.0`
   * If the version of Flink is below 1.15.0, you also need to add the 
`-Pflink-optimizer-pre-1.15` parameter: `mvn clean package -DskipTests 
-Pflink-optimizer-pre-1.15 -Dflink-optimizer.flink-version=1.14.6`
diff --git 
a/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/controller/CatalogController.java
 
b/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/controller/CatalogController.java
index c85111a80..6772d87c5 100644
--- 
a/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/controller/CatalogController.java
+++ 
b/amoro-ams/src/main/java/org/apache/amoro/server/dashboard/controller/CatalogController.java
@@ -40,12 +40,14 @@ import static 
org.apache.amoro.properties.CatalogMetaProperties.CATALOG_TYPE_HAD
 import static 
org.apache.amoro.properties.CatalogMetaProperties.CATALOG_TYPE_HIVE;
 import static org.apache.amoro.properties.CatalogMetaProperties.KEY_WAREHOUSE;
 import static 
org.apache.amoro.properties.CatalogMetaProperties.STORAGE_CONFIGS_KEY_CORE_SITE;
-import static 
org.apache.amoro.properties.CatalogMetaProperties.STORAGE_CONFIGS_KEY_ENDPOINT;
 import static 
org.apache.amoro.properties.CatalogMetaProperties.STORAGE_CONFIGS_KEY_HDFS_SITE;
 import static 
org.apache.amoro.properties.CatalogMetaProperties.STORAGE_CONFIGS_KEY_HIVE_SITE;
+import static 
org.apache.amoro.properties.CatalogMetaProperties.STORAGE_CONFIGS_KEY_OSS_ENDPOINT;
 import static 
org.apache.amoro.properties.CatalogMetaProperties.STORAGE_CONFIGS_KEY_REGION;
+import static 
org.apache.amoro.properties.CatalogMetaProperties.STORAGE_CONFIGS_KEY_S3_ENDPOINT;
 import static 
org.apache.amoro.properties.CatalogMetaProperties.STORAGE_CONFIGS_KEY_TYPE;
 import static 
org.apache.amoro.properties.CatalogMetaProperties.STORAGE_CONFIGS_VALUE_TYPE_HADOOP;
+import static 
org.apache.amoro.properties.CatalogMetaProperties.STORAGE_CONFIGS_VALUE_TYPE_OSS;
 import static 
org.apache.amoro.properties.CatalogMetaProperties.STORAGE_CONFIGS_VALUE_TYPE_S3;
 import static org.apache.amoro.properties.CatalogMetaProperties.TABLE_FORMATS;
 
@@ -75,6 +77,7 @@ import org.apache.amoro.table.TableProperties;
 import org.apache.amoro.utils.CatalogUtil;
 import org.apache.commons.lang.StringUtils;
 import org.apache.iceberg.CatalogProperties;
+import org.apache.iceberg.aliyun.AliyunProperties;
 import org.apache.iceberg.aws.AwsClientProperties;
 import org.apache.iceberg.aws.glue.GlueCatalog;
 import org.apache.iceberg.aws.s3.S3FileIOProperties;
@@ -114,6 +117,10 @@ public class CatalogController {
         CatalogDescriptor.of(CATALOG_TYPE_AMS, STORAGE_CONFIGS_VALUE_TYPE_S3, 
ICEBERG));
     VALIDATE_CATALOGS.add(
         CatalogDescriptor.of(CATALOG_TYPE_AMS, STORAGE_CONFIGS_VALUE_TYPE_S3, 
MIXED_ICEBERG));
+    VALIDATE_CATALOGS.add(
+        CatalogDescriptor.of(CATALOG_TYPE_AMS, STORAGE_CONFIGS_VALUE_TYPE_OSS, 
ICEBERG));
+    VALIDATE_CATALOGS.add(
+        CatalogDescriptor.of(CATALOG_TYPE_AMS, STORAGE_CONFIGS_VALUE_TYPE_OSS, 
MIXED_ICEBERG));
     VALIDATE_CATALOGS.add(
         CatalogDescriptor.of(CATALOG_TYPE_AMS, 
STORAGE_CONFIGS_VALUE_TYPE_HADOOP, ICEBERG));
     VALIDATE_CATALOGS.add(
@@ -145,6 +152,14 @@ public class CatalogController {
         CatalogDescriptor.of(CATALOG_TYPE_CUSTOM, 
STORAGE_CONFIGS_VALUE_TYPE_S3, ICEBERG));
     VALIDATE_CATALOGS.add(
         CatalogDescriptor.of(CATALOG_TYPE_CUSTOM, 
STORAGE_CONFIGS_VALUE_TYPE_HADOOP, ICEBERG));
+    VALIDATE_CATALOGS.add(
+        CatalogDescriptor.of(CATALOG_TYPE_HADOOP, 
STORAGE_CONFIGS_VALUE_TYPE_OSS, PAIMON));
+    VALIDATE_CATALOGS.add(
+        CatalogDescriptor.of(CATALOG_TYPE_GLUE, 
STORAGE_CONFIGS_VALUE_TYPE_OSS, ICEBERG));
+    VALIDATE_CATALOGS.add(
+        CatalogDescriptor.of(CATALOG_TYPE_GLUE, 
STORAGE_CONFIGS_VALUE_TYPE_OSS, MIXED_ICEBERG));
+    VALIDATE_CATALOGS.add(
+        CatalogDescriptor.of(CATALOG_TYPE_CUSTOM, 
STORAGE_CONFIGS_VALUE_TYPE_OSS, ICEBERG));
     VALIDATE_CATALOGS.add(
         CatalogDescriptor.of(
             CATALOG_TYPE_CUSTOM, STORAGE_CONFIGS_VALUE_TYPE_HADOOP, 
MIXED_ICEBERG));
@@ -182,11 +197,16 @@ public class CatalogController {
         String.valueOf(authConfig.get(AUTH_CONFIGS_KEY_TYPE)))) {
       hiddenProperties.add(S3FileIOProperties.ACCESS_KEY_ID);
       hiddenProperties.add(S3FileIOProperties.SECRET_ACCESS_KEY);
+      hiddenProperties.add(AliyunProperties.CLIENT_ACCESS_KEY_ID);
+      hiddenProperties.add(AliyunProperties.CLIENT_ACCESS_KEY_SECRET);
     }
     if 
(STORAGE_CONFIGS_VALUE_TYPE_S3.equals(storageConfig.get(STORAGE_CONFIGS_KEY_TYPE)))
 {
       hiddenProperties.add(AwsClientProperties.CLIENT_REGION);
       hiddenProperties.add(S3FileIOProperties.ENDPOINT);
     }
+    if 
(STORAGE_CONFIGS_VALUE_TYPE_OSS.equals(storageConfig.get(STORAGE_CONFIGS_KEY_TYPE)))
 {
+      hiddenProperties.add(AliyunProperties.OSS_ENDPOINT);
+    }
     return hiddenProperties;
   }
 
@@ -205,7 +225,10 @@ public class CatalogController {
   }
 
   private void fillAuthConfigs2CatalogMeta(
-      CatalogMeta catalogMeta, Map<String, String> serverAuthConfig, 
CatalogMeta oldCatalogMeta) {
+      CatalogMeta catalogMeta,
+      Map<String, String> serverAuthConfig,
+      CatalogMeta oldCatalogMeta,
+      String storageType) {
     Map<String, String> metaAuthConfig = new HashMap<>();
     String authType =
         serverAuthConfig
@@ -253,19 +276,19 @@ public class CatalogController {
             serverAuthConfig,
             catalogMeta.getCatalogProperties(),
             AUTH_CONFIGS_KEY_ACCESS_KEY,
-            S3FileIOProperties.ACCESS_KEY_ID);
+            getStorageAccessKey(storageType));
         CatalogUtil.copyProperty(
             serverAuthConfig,
             catalogMeta.getCatalogProperties(),
             AUTH_CONFIGS_KEY_SECRET_KEY,
-            S3FileIOProperties.SECRET_ACCESS_KEY);
+            getStorageSecretKey(storageType));
         break;
     }
     catalogMeta.setAuthConfigs(metaAuthConfig);
   }
 
   private Map<String, Object> extractAuthConfigsFromCatalogMeta(
-      String catalogName, CatalogMeta catalogMeta) {
+      String catalogName, CatalogMeta catalogMeta, String storageType) {
     Map<String, Object> serverAuthConfig = new HashMap<>();
     Map<String, String> metaAuthConfig = catalogMeta.getAuthConfigs();
     String authType =
@@ -298,12 +321,12 @@ public class CatalogController {
         CatalogUtil.copyProperty(
             catalogMeta.getCatalogProperties(),
             serverAuthConfig,
-            S3FileIOProperties.ACCESS_KEY_ID,
+            getStorageAccessKey(storageType),
             AUTH_CONFIGS_KEY_ACCESS_KEY);
         CatalogUtil.copyProperty(
             catalogMeta.getCatalogProperties(),
             serverAuthConfig,
-            S3FileIOProperties.SECRET_ACCESS_KEY,
+            getStorageSecretKey(storageType),
             AUTH_CONFIGS_KEY_SECRET_KEY);
         break;
     }
@@ -311,11 +334,25 @@ public class CatalogController {
     return serverAuthConfig;
   }
 
+  private String getStorageAccessKey(String storageType) {
+    if (STORAGE_CONFIGS_VALUE_TYPE_OSS.equals(storageType)) {
+      return AliyunProperties.CLIENT_ACCESS_KEY_ID;
+    }
+    // default s3
+    return S3FileIOProperties.ACCESS_KEY_ID;
+  }
+
+  private String getStorageSecretKey(String storageType) {
+    if (STORAGE_CONFIGS_VALUE_TYPE_OSS.equals(storageType)) {
+      return AliyunProperties.CLIENT_ACCESS_KEY_SECRET;
+    }
+    // default s3
+    return S3FileIOProperties.SECRET_ACCESS_KEY;
+  }
+
   private Map<String, Object> extractStorageConfigsFromCatalogMeta(
-      String catalogName, CatalogMeta catalogMeta) {
+      String catalogName, CatalogMeta catalogMeta, String storageType) {
     Map<String, Object> storageConfig = new HashMap<>();
-    Map<String, String> config = catalogMeta.getStorageConfigs();
-    String storageType = CatalogUtil.getCompatibleStorageType(config);
     storageConfig.put(STORAGE_CONFIGS_KEY_TYPE, storageType);
     if (STORAGE_CONFIGS_VALUE_TYPE_HADOOP.equals(storageType)) {
       storageConfig.put(
@@ -354,7 +391,13 @@ public class CatalogController {
           catalogMeta.getCatalogProperties(),
           storageConfig,
           S3FileIOProperties.ENDPOINT,
-          STORAGE_CONFIGS_KEY_ENDPOINT);
+          STORAGE_CONFIGS_KEY_S3_ENDPOINT);
+    } else if (STORAGE_CONFIGS_VALUE_TYPE_OSS.equals(storageType)) {
+      CatalogUtil.copyProperty(
+          catalogMeta.getCatalogProperties(),
+          storageConfig,
+          AliyunProperties.OSS_ENDPOINT,
+          STORAGE_CONFIGS_KEY_OSS_ENDPOINT);
     }
 
     return storageConfig;
@@ -387,12 +430,12 @@ public class CatalogController {
           "Invalid table format list, " + String.join(",", 
info.getTableFormatList()));
     }
     
catalogMeta.getCatalogProperties().put(CatalogMetaProperties.TABLE_FORMATS, 
tableFormats);
-    fillAuthConfigs2CatalogMeta(catalogMeta, info.getAuthConfig(), 
oldCatalogMeta);
-    // change fileId to base64Code
-    Map<String, String> metaStorageConfig = new HashMap<>();
     String storageType =
         info.getStorageConfig()
             .getOrDefault(STORAGE_CONFIGS_KEY_TYPE, 
STORAGE_CONFIGS_VALUE_TYPE_HADOOP);
+    fillAuthConfigs2CatalogMeta(catalogMeta, info.getAuthConfig(), 
oldCatalogMeta, storageType);
+    // change fileId to base64Code
+    Map<String, String> metaStorageConfig = new HashMap<>();
     metaStorageConfig.put(STORAGE_CONFIGS_KEY_TYPE, storageType);
     if (storageType.equals(STORAGE_CONFIGS_VALUE_TYPE_HADOOP)) {
       List<String> metaKeyList =
@@ -429,8 +472,14 @@ public class CatalogController {
       CatalogUtil.copyProperty(
           info.getStorageConfig(),
           catalogMeta.getCatalogProperties(),
-          STORAGE_CONFIGS_KEY_ENDPOINT,
+          STORAGE_CONFIGS_KEY_S3_ENDPOINT,
           S3FileIOProperties.ENDPOINT);
+    } else if (storageType.equals(STORAGE_CONFIGS_VALUE_TYPE_OSS)) {
+      CatalogUtil.copyProperty(
+          info.getStorageConfig(),
+          catalogMeta.getCatalogProperties(),
+          STORAGE_CONFIGS_KEY_OSS_ENDPOINT,
+          AliyunProperties.OSS_ENDPOINT);
     } else {
       throw new RuntimeException("Invalid storage type " + storageType);
     }
@@ -541,8 +590,10 @@ public class CatalogController {
       } else {
         info.setType(catalogMeta.getCatalogType());
       }
-      info.setAuthConfig(extractAuthConfigsFromCatalogMeta(catalogName, 
catalogMeta));
-      info.setStorageConfig(extractStorageConfigsFromCatalogMeta(catalogName, 
catalogMeta));
+      String storageType = 
CatalogUtil.getCompatibleStorageType(catalogMeta.getStorageConfigs());
+      info.setAuthConfig(extractAuthConfigsFromCatalogMeta(catalogName, 
catalogMeta, storageType));
+      info.setStorageConfig(
+          extractStorageConfigsFromCatalogMeta(catalogName, catalogMeta, 
storageType));
       // we put the table format single
       String tableFormat =
           
catalogMeta.getCatalogProperties().get(CatalogMetaProperties.TABLE_FORMATS);
diff --git 
a/amoro-ams/src/main/java/org/apache/amoro/server/table/internal/InternalTableConstants.java
 
b/amoro-ams/src/main/java/org/apache/amoro/server/table/internal/InternalTableConstants.java
index dcec9de4b..b2afee8be 100644
--- 
a/amoro-ams/src/main/java/org/apache/amoro/server/table/internal/InternalTableConstants.java
+++ 
b/amoro-ams/src/main/java/org/apache/amoro/server/table/internal/InternalTableConstants.java
@@ -33,6 +33,8 @@ public class InternalTableConstants {
   public static final String HADOOP_FILE_IO_IMPL = 
"org.apache.iceberg.hadoop.HadoopFileIO";
   public static final String S3_FILE_IO_IMPL = 
"org.apache.iceberg.aws.s3.S3FileIO";
   public static final String S3_PROTOCOL_PREFIX = "s3://";
+  public static final String OSS_FILE_IO_IMPL = 
"org.apache.iceberg.aliyun.oss.OSSFileIO";
+  public static final String OSS_PROTOCOL_PREFIX = "oss://";
 
   public static final String CHANGE_STORE_TABLE_NAME_SUFFIX =
       InternalMixedIcebergCatalog.CHANGE_STORE_SEPARATOR
diff --git 
a/amoro-ams/src/main/java/org/apache/amoro/server/utils/InternalTableUtil.java 
b/amoro-ams/src/main/java/org/apache/amoro/server/utils/InternalTableUtil.java
index 5d5ab39d0..87306dd56 100644
--- 
a/amoro-ams/src/main/java/org/apache/amoro/server/utils/InternalTableUtil.java
+++ 
b/amoro-ams/src/main/java/org/apache/amoro/server/utils/InternalTableUtil.java
@@ -21,6 +21,8 @@ package org.apache.amoro.server.utils;
 import static 
org.apache.amoro.server.table.internal.InternalTableConstants.HADOOP_FILE_IO_IMPL;
 import static 
org.apache.amoro.server.table.internal.InternalTableConstants.METADATA_FOLDER_NAME;
 import static 
org.apache.amoro.server.table.internal.InternalTableConstants.MIXED_ICEBERG_BASED_REST;
+import static 
org.apache.amoro.server.table.internal.InternalTableConstants.OSS_FILE_IO_IMPL;
+import static 
org.apache.amoro.server.table.internal.InternalTableConstants.OSS_PROTOCOL_PREFIX;
 import static 
org.apache.amoro.server.table.internal.InternalTableConstants.S3_FILE_IO_IMPL;
 import static 
org.apache.amoro.server.table.internal.InternalTableConstants.S3_PROTOCOL_PREFIX;
 
@@ -88,6 +90,8 @@ public class InternalTableUtil {
     String defaultImpl = HADOOP_FILE_IO_IMPL;
     if (warehouse.toLowerCase().startsWith(S3_PROTOCOL_PREFIX)) {
       defaultImpl = S3_FILE_IO_IMPL;
+    } else if (warehouse.toLowerCase().startsWith(OSS_PROTOCOL_PREFIX)) {
+      defaultImpl = OSS_FILE_IO_IMPL;
     }
     String ioImpl = 
catalogProperties.getOrDefault(CatalogProperties.FILE_IO_IMPL, defaultImpl);
     FileIO fileIO = org.apache.iceberg.CatalogUtil.loadFileIO(ioImpl, 
catalogProperties, conf);
diff --git 
a/amoro-common/src/main/java/org/apache/amoro/properties/CatalogMetaProperties.java
 
b/amoro-common/src/main/java/org/apache/amoro/properties/CatalogMetaProperties.java
index 8748190fb..e94f88961 100644
--- 
a/amoro-common/src/main/java/org/apache/amoro/properties/CatalogMetaProperties.java
+++ 
b/amoro-common/src/main/java/org/apache/amoro/properties/CatalogMetaProperties.java
@@ -26,11 +26,13 @@ public class CatalogMetaProperties {
   public static final String STORAGE_CONFIGS_KEY_CORE_SITE = 
"hadoop.core.site";
   public static final String STORAGE_CONFIGS_KEY_HIVE_SITE = "hive.site";
   public static final String STORAGE_CONFIGS_KEY_REGION = "storage.s3.region";
-  public static final String STORAGE_CONFIGS_KEY_ENDPOINT = 
"storage.s3.endpoint";
+  public static final String STORAGE_CONFIGS_KEY_S3_ENDPOINT = 
"storage.s3.endpoint";
+  public static final String STORAGE_CONFIGS_KEY_OSS_ENDPOINT = 
"storage.oss.endpoint";
 
   public static final String STORAGE_CONFIGS_VALUE_TYPE_HDFS_LEGACY = "hdfs";
   public static final String STORAGE_CONFIGS_VALUE_TYPE_HADOOP = "Hadoop";
   public static final String STORAGE_CONFIGS_VALUE_TYPE_S3 = "S3";
+  public static final String STORAGE_CONFIGS_VALUE_TYPE_OSS = "OSS";
 
   public static final String AUTH_CONFIGS_KEY_TYPE = "auth.type";
   public static final String AUTH_CONFIGS_KEY_PRINCIPAL = 
"auth.kerberos.principal";
diff --git a/amoro-format-iceberg/pom.xml b/amoro-format-iceberg/pom.xml
index f05f58c2f..47919fce4 100644
--- a/amoro-format-iceberg/pom.xml
+++ b/amoro-format-iceberg/pom.xml
@@ -107,6 +107,22 @@
             <artifactId>iceberg-aws</artifactId>
         </dependency>
 
+        <dependency>
+            <groupId>org.apache.iceberg</groupId>
+            <artifactId>iceberg-aliyun</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>com.aliyun.oss</groupId>
+            <artifactId>aliyun-sdk-oss</artifactId>
+            <exclusions>
+                <exclusion>
+                    <artifactId>httpclient</artifactId>
+                    <groupId>org.apache.httpcomponents</groupId>
+                </exclusion>
+            </exclusions>
+        </dependency>
+
         <dependency>
             <groupId>org.apache.parquet</groupId>
             <artifactId>parquet-avro</artifactId>
@@ -163,7 +179,7 @@
         <dependency>
             <groupId>org.apache.amoro</groupId>
             <artifactId>amoro-common</artifactId>
-            <version>${parent.version}</version>
+            <version>${project.parent.version}</version>
             <classifier>tests</classifier>
             <scope>test</scope>
         </dependency>
diff --git 
a/amoro-format-mixed/amoro-mixed-spark/v3.2/amoro-mixed-spark-runtime-3.2/pom.xml
 
b/amoro-format-mixed/amoro-mixed-spark/v3.2/amoro-mixed-spark-runtime-3.2/pom.xml
index acce293f8..f243bd1d4 100644
--- 
a/amoro-format-mixed/amoro-mixed-spark/v3.2/amoro-mixed-spark-runtime-3.2/pom.xml
+++ 
b/amoro-format-mixed/amoro-mixed-spark/v3.2/amoro-mixed-spark-runtime-3.2/pom.xml
@@ -75,6 +75,7 @@
                                     
<include>org.apache.iceberg:iceberg-orc</include>
                                     
<include>org.apache.iceberg:iceberg-parquet</include>
                                     
<include>org.apache.iceberg:iceberg-aws</include>
+                                    
<include>org.apache.iceberg:iceberg-aliyun</include>
                                     
<include>org.apache.parquet:parquet-column</include>
                                     
<include>org.apache.parquet:parquet-hadoop</include>
                                     
<include>org.apache.parquet:parquet-common</include>
diff --git 
a/amoro-format-mixed/amoro-mixed-spark/v3.3/amoro-mixed-spark-3.3/pom.xml 
b/amoro-format-mixed/amoro-mixed-spark/v3.3/amoro-mixed-spark-3.3/pom.xml
index 6e54618f5..8d6cbff8b 100644
--- a/amoro-format-mixed/amoro-mixed-spark/v3.3/amoro-mixed-spark-3.3/pom.xml
+++ b/amoro-format-mixed/amoro-mixed-spark/v3.3/amoro-mixed-spark-3.3/pom.xml
@@ -277,7 +277,7 @@
         <dependency>
             <groupId>org.apache.amoro</groupId>
             <artifactId>amoro-format-paimon</artifactId>
-            <version>${parent.version}</version>
+            <version>${project.parent.version}</version>
             <scope>test</scope>
         </dependency>
 
diff --git 
a/amoro-format-mixed/amoro-mixed-spark/v3.3/amoro-mixed-spark-runtime-3.3/pom.xml
 
b/amoro-format-mixed/amoro-mixed-spark/v3.3/amoro-mixed-spark-runtime-3.3/pom.xml
index 3af0cf6de..09e898abe 100644
--- 
a/amoro-format-mixed/amoro-mixed-spark/v3.3/amoro-mixed-spark-runtime-3.3/pom.xml
+++ 
b/amoro-format-mixed/amoro-mixed-spark/v3.3/amoro-mixed-spark-runtime-3.3/pom.xml
@@ -80,6 +80,7 @@
                                     
<include>org.apache.iceberg:iceberg-orc</include>
                                     
<include>org.apache.iceberg:iceberg-parquet</include>
                                     
<include>org.apache.iceberg:iceberg-aws</include>
+                                    
<include>org.apache.iceberg:iceberg-aliyun</include>
                                     
<include>org.apache.parquet:parquet-column</include>
                                     
<include>org.apache.parquet:parquet-hadoop</include>
                                     
<include>org.apache.parquet:parquet-common</include>
diff --git a/amoro-web/src/views/catalogs/Detail.vue 
b/amoro-web/src/views/catalogs/Detail.vue
index 9cbad7763..4f3d53042 100644
--- a/amoro-web/src/views/catalogs/Detail.vue
+++ b/amoro-web/src/views/catalogs/Detail.vue
@@ -172,6 +172,14 @@ const s3ConfigTypeOps = reactive<ILableAndValue[]>([{
   value: 'CUSTOM',
 }])
 
+const ossConfigTypeOps = reactive<ILableAndValue[]>([{
+  label: 'AK/SK',
+  value: 'AK/SK',
+}, {
+  label: 'CUSTOM',
+  value: 'CUSTOM',
+}])
+
 const storageConfigMap = {
   'hadoop.core.site': 'Hadoop core-site',
   'hadoop.hdfs.site': 'Hadoop hdfs-site',
@@ -366,9 +374,17 @@ async function changeProperties() {
   formState.properties = properties
 }
 
-const storageConfigTypeS3 = reactive<ILableAndValue[]>([{
+const storageConfigTypeS3Oss = reactive<ILableAndValue[]>([{
   label: 'S3',
   value: 'S3',
+}, {
+  label: 'OSS',
+  value: 'OSS',
+}])
+
+const storageConfigTypeOSS = reactive<ILableAndValue[]>([{
+  label: 'OSS',
+  value: 'OSS',
 }])
 
 const storageConfigTypeHadoop = reactive<ILableAndValue[]>([{
@@ -376,27 +392,30 @@ const storageConfigTypeHadoop = 
reactive<ILableAndValue[]>([{
   value: 'Hadoop',
 }])
 
-const storageConfigTypeHadoopS3 = reactive<ILableAndValue[]>([{
+const storageConfigTypeHadoopS3Oss = reactive<ILableAndValue[]>([{
   label: 'Hadoop',
   value: 'Hadoop',
 }, {
   label: 'S3',
   value: 'S3',
+}, {
+  label: 'OSS',
+  value: 'OSS',
 }])
 
 const storageConfigTypeOps = computed(() => {
   const type = formState.catalog.type
   if (type === 'ams' || type === 'custom') {
-    return storageConfigTypeHadoopS3
+    return storageConfigTypeHadoopS3Oss
   }
   else if (type === 'glue') {
-    return storageConfigTypeS3
+    return storageConfigTypeS3Oss
   }
   else if (type === 'hive') {
     return storageConfigTypeHadoop
   }
   else if (type === 'hadoop') {
-    return storageConfigTypeHadoopS3
+    return storageConfigTypeHadoopS3Oss
   }
   else {
     return null
@@ -411,6 +430,9 @@ const authTypeOptions = computed(() => {
   else if (type === 'S3') {
     return s3ConfigTypeOps
   }
+  else if (type === 'OSS') {
+    return ossConfigTypeOps
+  }
 
   return null
 })
@@ -677,6 +699,13 @@ onMounted(() => {
             <a-input v-if="isEdit" 
v-model:value="formState.storageConfig['storage.s3.region']" />
             <span v-else class="config-value">{{ 
formState.storageConfig['storage.s3.region'] }}</span>
           </a-form-item>
+          <a-form-item
+              v-if="formState.storageConfig['storage.type'] === 'OSS'" 
label="Endpoint"
+              :name="['storageConfig', 'storage.oss.endpoint']" :rules="[{ 
required: false }]"
+          >
+            <a-input v-if="isEdit" 
v-model:value="formState.storageConfig['storage.oss.endpoint']" />
+            <span v-else class="config-value">{{ 
formState.storageConfig['storage.oss.endpoint'] }}</span>
+          </a-form-item>
           <div v-if="formState.storageConfig['storage.type'] === 'Hadoop'">
             <a-form-item
               v-for="config in formState.storageConfigArray" 
:key="config.label" :label="config.label"
diff --git a/http/README.md b/http/README.md
new file mode 100644
index 000000000..3b84ffbe9
--- /dev/null
+++ b/http/README.md
@@ -0,0 +1,62 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+ -->
+
+# HTTP Client Configuration for Local Testing  
+
+This file is designed for local testing of HTTP interfaces using the **HTTP 
Client** plugin in IntelliJ IDEA. It enables quick testing of REST APIs 
directly within the IDE without requiring additional external tools or scripts. 
 
+
+## Prerequisites  
+1. Install IntelliJ IDEA (Community or Ultimate Edition).  
+2. Install the **HTTP Client** plugin if it is not already installed.  
+
+   For more information on the HTTP Client plugin, refer to the official 
documentation:  
+   [HTTP Client in IntelliJ 
IDEA](https://www.jetbrains.com/help/idea/http-client-in-product-code-editor.html)
  
+
+## How to Use  
+1. Open the `.http` or `.rest` file in IntelliJ IDEA.  
+2. Use the provided HTTP requests to test your API endpoints.  
+3. Select an HTTP request and click the **Run** button that appears next to it 
in the editor, or press `Ctrl+Enter` (Windows/Linux) or `Command+Enter` (Mac).  
+4. View the response in the dedicated response panel.  
+
+### Example  
+```http
+GET http://localhost:8080/api/example
+Content-Type: application/json
+Authorization: Bearer <your_token>
+```
+
+### Features
+
+-   Supports HTTP methods like GET, POST, PUT, DELETE, etc.
+-   Easily add headers, query parameters, and body content.
+-   View responses interactively with support for formats like JSON, XML, etc.
+
+## Scope of Use
+
+This file is intended **only for local development and testing purposes**. It 
is not meant for production or CI/CD pipelines.
+
+## Recommendations for Automated Testing
+
+While the HTTP Client plugin is great for manual testing, we recommend 
integrating REST API tests into your automated test suite for better coverage 
and reliability.
+
+### Other Suggested Tools
+
+-   **JUnit** with [RestAssured](https://rest-assured.io/) for Java-based 
integration tests.
+-   **Postman** with [Newman](https://www.npmjs.com/package/newman) for 
automated API testing.
+
diff --git a/http/RestCatalogLocalTest.http b/http/RestCatalogLocalTest.http
new file mode 100644
index 000000000..6c59ebfc6
--- /dev/null
+++ b/http/RestCatalogLocalTest.http
@@ -0,0 +1,54 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+### get catalog config
+GET http://localhost:1630/api/iceberg/rest/v1/config?warehouse=iceberg
+Content-Type: application/json
+
+### list namespace
+GET http://localhost:1630/api/iceberg/rest/v1/catalogs/iceberg/namespaces
+Content-Type: application/json
+
+### create namespace
+POST http://localhost:1630/api/iceberg/rest/v1/catalogs/iceberg/namespaces
+Content-Type: application/json
+
+{
+  "namespace": [
+    "test_db"
+  ]
+}
+
+### query namespace
+GET 
http://localhost:1630/api/iceberg/rest/v1/catalogs/iceberg/namespaces/test_db
+Content-Type: application/json
+
+### set namespace properties throws UnsupportedOperationException
+POST 
http://localhost:1630/api/iceberg/rest/v1/catalogs/iceberg/namespaces/test_db
+Content-Type: application/json
+
+### delete namespace
+DELETE 
http://localhost:1630/api/iceberg/rest/v1/catalogs/iceberg/namespaces/test_db
+Content-Type: application/json
+
+### list tables in namespace
+GET 
http://localhost:1630/api/iceberg/rest/v1/catalogs/iceberg/namespaces/sandbox/tables
+Content-Type: application/json
+
+### load table
+GET 
http://localhost:1630/api/iceberg/rest/v1/catalogs/iceberg/namespaces/sandbox/tables/test_tbl
+Content-Type: application/json
diff --git a/pom.xml b/pom.xml
index 9132f4e59..97a882b18 100644
--- a/pom.xml
+++ b/pom.xml
@@ -121,6 +121,7 @@
         <mysql-jdbc.version>8.0.33</mysql-jdbc.version>
         <orc-core.version>1.8.3</orc-core.version>
         <awssdk.version>2.24.12</awssdk.version>
+        <aliyun-sdk-oss.version>3.10.2</aliyun-sdk-oss.version>
         <terminal.spark.version>3.3.2</terminal.spark.version>
         <terminal.spark.major.version>3.3</terminal.spark.major.version>
         <dropwizard.metrics.version>4.2.19</dropwizard.metrics.version>
@@ -153,6 +154,7 @@
 
         <rocksdb-dependency-scope>compile</rocksdb-dependency-scope>
         <lucene-dependency-scope>compile</lucene-dependency-scope>
+        <aliyun-sdk-dependency-scope>provided</aliyun-sdk-dependency-scope>
     </properties>
 
     <dependencies>
@@ -350,6 +352,19 @@
                 <version>${iceberg.version}</version>
             </dependency>
 
+            <dependency>
+                <groupId>org.apache.iceberg</groupId>
+                <artifactId>iceberg-aliyun</artifactId>
+                <version>${iceberg.version}</version>
+            </dependency>
+
+            <dependency>
+                <groupId>com.aliyun.oss</groupId>
+                <artifactId>aliyun-sdk-oss</artifactId>
+                <version>${aliyun-sdk-oss.version}</version>
+                <scope>${aliyun-sdk-dependency-scope}</scope>
+            </dependency>
+
             <dependency>
                 <groupId>org.apache.parquet</groupId>
                 <artifactId>parquet-avro</artifactId>
@@ -1356,5 +1371,11 @@
                 <lucene-dependency-scope>provided</lucene-dependency-scope>
             </properties>
         </profile>
+        <profile>
+            <id>aliyun-oss-sdk</id>
+            <properties>
+                
<aliyun-sdk-dependency-scope>compile</aliyun-sdk-dependency-scope>
+            </properties>
+        </profile>
     </profiles>
 </project>

Reply via email to