This is an automated email from the ASF dual-hosted git repository.
morningman pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/branch-3.1 by this push:
new bb6b82d1e69 branch-3.1: [feat](test)Add Azure Blob Test (#57268)
(#57525)
bb6b82d1e69 is described below
commit bb6b82d1e6939c311644b2b0f37807c04e22be00
Author: Calvin Kirs <[email protected]>
AuthorDate: Fri Oct 31 17:52:06 2025 +0800
branch-3.1: [feat](test)Add Azure Blob Test (#57268) (#57525)
(cherry picked from commit 7fa6aea5a909ca8b73212cd93ec7ba73a6cdbf29)
#57268
---
be/src/runtime/snapshot_loader.cpp | 2 +-
fe/be-java-extensions/paimon-scanner/pom.xml | 4 +
.../org/apache/doris/backup/BackupHandler.java | 1 +
.../org/apache/doris/catalog/AzureResource.java | 5 +-
.../property/storage/AzurePropertyUtils.java | 23 +--
.../org/apache/doris/fs/obj/AzureObjStorage.java | 11 ++
.../azure_blob_all_test.groovy | 140 ++++++++++++++
.../azure_non_catalog_all_test.groovy | 206 +++++++++++++++++++++
.../backup_restore_azure.groovy | 63 ++++---
9 files changed, 408 insertions(+), 47 deletions(-)
diff --git a/be/src/runtime/snapshot_loader.cpp
b/be/src/runtime/snapshot_loader.cpp
index 6211d84e4cc..0fa95554197 100644
--- a/be/src/runtime/snapshot_loader.cpp
+++ b/be/src/runtime/snapshot_loader.cpp
@@ -743,7 +743,7 @@ BaseSnapshotLoader::BaseSnapshotLoader(ExecEnv* env,
int64_t job_id, int64_t tas
: _env(env), _job_id(job_id), _task_id(task_id),
_broker_addr(broker_addr), _prop(prop) {}
Status BaseSnapshotLoader::init(TStorageBackendType::type type, const
std::string& location) {
- if (TStorageBackendType::type::S3 == type) {
+ if (TStorageBackendType::type::S3 == type ||
TStorageBackendType::type::AZURE == type) {
S3Conf s3_conf;
S3URI s3_uri(location);
RETURN_IF_ERROR(s3_uri.parse());
diff --git a/fe/be-java-extensions/paimon-scanner/pom.xml
b/fe/be-java-extensions/paimon-scanner/pom.xml
index 70993a0dd41..c1b173e46a3 100644
--- a/fe/be-java-extensions/paimon-scanner/pom.xml
+++ b/fe/be-java-extensions/paimon-scanner/pom.xml
@@ -40,6 +40,10 @@ under the License.
<artifactId>java-common</artifactId>
<version>${project.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-azure</artifactId>
+ </dependency>
<dependency>
<groupId>org.apache.paimon</groupId>
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java
b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java
index c5e6b180f06..6dc051af13f 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java
@@ -224,6 +224,7 @@ public class BackupHandler extends MasterDaemon implements
Writable {
ErrorReport.reportDdlException(ErrorCode.ERR_COMMON_ERROR,
"Failed to create repository: " +
st.getErrMsg());
}
+ //fixme why ping again? it has pinged in addAndInitRepoIfNotExist
if (!repo.ping()) {
ErrorReport.reportDdlException(ErrorCode.ERR_COMMON_ERROR,
"Failed to create repository: failed to connect to the
repo");
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/catalog/AzureResource.java
b/fe/fe-core/src/main/java/org/apache/doris/catalog/AzureResource.java
index ef278e6dcdf..54477da2db4 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/AzureResource.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/AzureResource.java
@@ -87,8 +87,9 @@ public class AzureResource extends Resource {
Map<String, String> newProperties) throws DdlException {
Long timestamp = System.currentTimeMillis();
- String testObj = "azure://" + bucketName + "/" + rootPath
- + "/doris-test-object-valid-" + timestamp.toString() + ".txt";
+ //todo @zyk Azure connection test
+ String testObj = "s3://" + bucketName + "/" + rootPath
+ + "/doris-test-object-valid-" + timestamp + ".txt";
byte[] contentData = new byte[2 * ObjStorage.CHUNK_SIZE];
Arrays.fill(contentData, (byte) 'A');
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AzurePropertyUtils.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AzurePropertyUtils.java
index 8c986b74da0..ab57d30f529 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AzurePropertyUtils.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AzurePropertyUtils.java
@@ -59,14 +59,11 @@ public class AzurePropertyUtils {
if (StringUtils.isBlank(path)) {
throw new StoragePropertiesException("Path cannot be null or
empty");
}
-
- String lower = path.toLowerCase();
-
// Only accept Azure Blob Storage-related URI schemes
- if (!(lower.startsWith("wasb://") || lower.startsWith("wasbs://")
- || lower.startsWith("abfs://") || lower.startsWith("abfss://")
- || lower.startsWith("https://") || lower.startsWith("http://")
- || lower.startsWith("s3://"))) {
+ if (!(path.startsWith("wasb://") || path.startsWith("wasbs://")
+ || path.startsWith("abfs://") || path.startsWith("abfss://")
+ || path.startsWith("https://") || path.startsWith("http://")
+ || path.startsWith("s3://"))) {
throw new StoragePropertiesException("Unsupported Azure URI
scheme: " + path);
}
@@ -92,14 +89,12 @@ public class AzurePropertyUtils {
if (StringUtils.isBlank(uri)) {
throw new StoragePropertiesException("URI is blank");
}
-
- String lowerUri = uri.toLowerCase();
- if (lowerUri.startsWith("s3://")) {
- return lowerUri;
+ if (uri.startsWith("s3://")) {
+ return uri;
}
// Handle Azure HDFS-style URIs (wasb://, wasbs://, abfs://, abfss://)
- if (lowerUri.startsWith("wasb://") || lowerUri.startsWith("wasbs://")
- || lowerUri.startsWith("abfs://") ||
lowerUri.startsWith("abfss://")) {
+ if (uri.startsWith("wasb://") || uri.startsWith("wasbs://")
+ || uri.startsWith("abfs://") || uri.startsWith("abfss://")) {
// Example:
wasbs://[email protected]/path/file.txt
String schemeRemoved = uri.replaceFirst("^[a-z]+s?://", "");
@@ -125,7 +120,7 @@ public class AzurePropertyUtils {
}
// ② Handle HTTPS/HTTP Azure Blob Storage URLs
- if (lowerUri.startsWith("https://") || lowerUri.startsWith("http://"))
{
+ if (uri.startsWith("https://") || uri.startsWith("http://")) {
try {
URI parsed = new URI(uri);
String host = parsed.getHost();
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/fs/obj/AzureObjStorage.java
b/fe/fe-core/src/main/java/org/apache/doris/fs/obj/AzureObjStorage.java
index 3e014f3db3f..a8bd9624d41 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/fs/obj/AzureObjStorage.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/fs/obj/AzureObjStorage.java
@@ -23,6 +23,7 @@ import org.apache.doris.common.UserException;
import org.apache.doris.common.util.S3URI;
import org.apache.doris.common.util.S3Util;
import org.apache.doris.datasource.property.storage.AzureProperties;
+import org.apache.doris.datasource.property.storage.AzurePropertyUtils;
import org.apache.doris.fs.remote.RemoteFile;
import com.azure.core.http.rest.PagedIterable;
@@ -120,6 +121,7 @@ public class AzureObjStorage implements
ObjStorage<BlobServiceClient> {
@Override
public Status headObject(String remotePath) {
try {
+ remotePath =
AzurePropertyUtils.validateAndNormalizeUri(remotePath);
S3URI uri = S3URI.create(remotePath, isUsePathStyle,
forceParsingByStandardUri);
BlobClient blobClient =
getClient().getBlobContainerClient(uri.getBucket()).getBlobClient(uri.getKey());
if (LOG.isDebugEnabled()) {
@@ -144,6 +146,7 @@ public class AzureObjStorage implements
ObjStorage<BlobServiceClient> {
@Override
public Status getObject(String remoteFilePath, File localFile) {
try {
+ remoteFilePath =
AzurePropertyUtils.validateAndNormalizeUri(remoteFilePath);
S3URI uri = S3URI.create(remoteFilePath, isUsePathStyle,
forceParsingByStandardUri);
BlobClient blobClient =
getClient().getBlobContainerClient(uri.getBucket()).getBlobClient(uri.getKey());
BlobProperties properties =
blobClient.downloadToFile(localFile.getAbsolutePath());
@@ -164,6 +167,7 @@ public class AzureObjStorage implements
ObjStorage<BlobServiceClient> {
@Override
public Status putObject(String remotePath, @Nullable InputStream content,
long contentLength) {
try {
+ remotePath =
AzurePropertyUtils.validateAndNormalizeUri(remotePath);
S3URI uri = S3URI.create(remotePath, isUsePathStyle,
forceParsingByStandardUri);
BlobClient blobClient =
getClient().getBlobContainerClient(uri.getBucket()).getBlobClient(uri.getKey());
blobClient.upload(content, contentLength);
@@ -181,6 +185,7 @@ public class AzureObjStorage implements
ObjStorage<BlobServiceClient> {
@Override
public Status deleteObject(String remotePath) {
try {
+ remotePath =
AzurePropertyUtils.validateAndNormalizeUri(remotePath);
S3URI uri = S3URI.create(remotePath, isUsePathStyle,
forceParsingByStandardUri);
BlobClient blobClient =
getClient().getBlobContainerClient(uri.getBucket()).getBlobClient(uri.getKey());
blobClient.delete();
@@ -204,6 +209,7 @@ public class AzureObjStorage implements
ObjStorage<BlobServiceClient> {
@Override
public Status deleteObjects(String remotePath) {
try {
+ remotePath =
AzurePropertyUtils.validateAndNormalizeUri(remotePath);
S3URI uri = S3URI.create(remotePath, isUsePathStyle,
forceParsingByStandardUri);
BlobContainerClient blobClient =
getClient().getBlobContainerClient(uri.getBucket());
String containerUrl = blobClient.getBlobContainerUrl();
@@ -285,6 +291,7 @@ public class AzureObjStorage implements
ObjStorage<BlobServiceClient> {
@Override
public RemoteObjects listObjects(String remotePath, String
continuationToken) throws DdlException {
try {
+ remotePath =
AzurePropertyUtils.validateAndNormalizeUri(remotePath);
S3URI uri = S3URI.create(remotePath, isUsePathStyle,
forceParsingByStandardUri);
ListBlobsOptions options = new
ListBlobsOptions().setPrefix(uri.getKey());
PagedIterable<BlobItem> pagedBlobs =
getClient().getBlobContainerClient(uri.getBucket())
@@ -320,6 +327,7 @@ public class AzureObjStorage implements
ObjStorage<BlobServiceClient> {
public Status listDirectories(String remotePath, Set<String> result) {
try {
+ remotePath =
AzurePropertyUtils.validateAndNormalizeUri(remotePath);
S3URI uri = S3URI.create(remotePath, isUsePathStyle,
forceParsingByStandardUri);
String bucket = uri.getBucket();
String key = uri.getKey();
@@ -347,6 +355,7 @@ public class AzureObjStorage implements
ObjStorage<BlobServiceClient> {
long startTime = System.nanoTime();
Status st = Status.OK;
try {
+ remotePath =
AzurePropertyUtils.validateAndNormalizeUri(remotePath);
S3URI uri = S3URI.create(remotePath, isUsePathStyle,
forceParsingByStandardUri);
String globPath = uri.getKey();
String bucket = uri.getBucket();
@@ -429,6 +438,7 @@ public class AzureObjStorage implements
ObjStorage<BlobServiceClient> {
public Status listFiles(String remotePath, boolean recursive,
List<RemoteFile> result) {
try {
+ remotePath =
AzurePropertyUtils.validateAndNormalizeUri(remotePath);
S3URI uri = S3URI.create(remotePath, isUsePathStyle,
forceParsingByStandardUri);
String bucket = uri.getBucket();
String key = uri.getKey();
@@ -482,6 +492,7 @@ public class AzureObjStorage implements
ObjStorage<BlobServiceClient> {
try {
+ remotePath =
AzurePropertyUtils.validateAndNormalizeUri(remotePath);
S3URI uri = S3URI.create(remotePath, isUsePathStyle,
forceParsingByStandardUri);
blockBlobClient =
getClient().getBlobContainerClient(uri.getBucket())
.getBlobClient(uri.getKey()).getBlockBlobClient();
diff --git
a/regression-test/suites/external_table_p2/refactor_catalog_param/azure_blob_all_test.groovy
b/regression-test/suites/external_table_p2/refactor_catalog_param/azure_blob_all_test.groovy
new file mode 100644
index 00000000000..03ef1c576ac
--- /dev/null
+++
b/regression-test/suites/external_table_p2/refactor_catalog_param/azure_blob_all_test.groovy
@@ -0,0 +1,140 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+import static groovy.test.GroovyAssert.shouldFail;
+import java.util.concurrent.ThreadLocalRandom
+
+suite("azure_blob_all_test", "p2,external,new_catalog_property") {
+
+
+ String abfsAzureAccountName =
context.config.otherConfigs.get("abfsAccountName")
+ String abfsAzureAccountKey =
context.config.otherConfigs.get("abfsAccountKey")
+ String abfsContainer = context.config.otherConfigs.get("abfsContainer")
+ String abfsEndpoint = context.config.otherConfigs.get("abfsEndpoint")
+ def abfs_azure_config_props = """
+ "provider" = "azure",
+ "azure.endpoint"="${abfsEndpoint}",
+ "azure.account_name" = "${abfsAzureAccountName}",
+ "azure.account_key" = "${abfsAzureAccountKey}"
+ """
+
+ // Iceberg FS
+
+ def testIcebergTest = { String storage_props,String
iceberg_fs_catalog_name, String protocol,String hdfsLocationType ->
+
+ sql """
+ drop catalog if exists ${iceberg_fs_catalog_name};
+ """
+ sql"""
+ create catalog ${iceberg_fs_catalog_name} properties(
+ 'type'='iceberg',
+ 'iceberg.catalog.type'='hadoop',
+
'warehouse'='${protocol}://${abfsContainer}@${abfsAzureAccountName}.${hdfsLocationType}.core.windows.net/regression/external/azure/${protocol}/iceberg_fs_warehouse/',
+ ${storage_props}
+ );
+ """
+
+ sql """
+ switch ${iceberg_fs_catalog_name}
+ """
+
+ sql """
+ drop database if exists ${iceberg_fs_catalog_name}_db_test;
+ """
+ sql """
+ create database ${iceberg_fs_catalog_name}_db_test;
+ """
+ sql """
+ use ${iceberg_fs_catalog_name}_db_test;
+ """
+ sql """
+ create table ${iceberg_fs_catalog_name}_table_test (id int, name
string)
+ """
+ sql """
+ insert into ${iceberg_fs_catalog_name}_table_test values(1,
'iceberg_fs_abfs_test');
+ """
+ def query_result = sql """
+ select count(1) from ${iceberg_fs_catalog_name}_table_test;
+ """
+
+ assert query_result[0][0] == 1
+
+ sql """
+ drop table if exists ${iceberg_fs_catalog_name}_table_test;
+ """
+ sql """
+ drop database if exists ${iceberg_fs_catalog_name}_db_test;
+ """
+ sql """
+ drop catalog if exists ${iceberg_fs_catalog_name};
+ """
+ }
+
+
+ //abfs
+ testIcebergTest(abfs_azure_config_props, "iceberg_fs_abfs_catalog",
"abfs","dfs")
+ testIcebergTest(abfs_azure_config_props, "iceberg_fs_abfss_catalog",
"abfss","dfs")
+
+
+
+ //abfss
+ def testPaimonTest = { String storage_props,String paimon_catalog_name,
String protocol,String hdfsLocationType,String queryTbl ->
+ sql """
+ drop catalog if exists ${paimon_catalog_name};
+ """
+ sql"""
+ create catalog ${paimon_catalog_name} properties(
+ 'type'='paimon',
+ 'paimon.catalog.type'='filesystem',
+
'warehouse'='${protocol}://${abfsContainer}@${abfsAzureAccountName}.${hdfsLocationType}.core.windows.net/regression/azure/${protocol}/paimon_fs_warehouse/',
+ ${abfs_azure_config_props}
+ );
+ """
+
+ sql """
+ switch ${paimon_catalog_name}
+ """
+
+ def query_result =sql """
+ select * from ${paimon_catalog_name}.${queryTbl}
+ """
+ println query_result
+
+ sql """
+ drop catalog if exists ${paimon_catalog_name};
+ """
+ }
+
+ // Paimon FS
+ sql """
+ set force_jni_scanner=false;
+ """
+
+ def paimon_fs_abfss_db_tbl = "paimon_fs_abfss_test_db.external_test_table"
+ def paimon_fs_abfs_db_tbl = "paimon_fs_abfs_test_db.external_test_table"
+ testPaimonTest(abfs_azure_config_props, "paimon_fs_abfs_catalog",
"abfs","dfs",paimon_fs_abfs_db_tbl)
+ testPaimonTest(abfs_azure_config_props, "paimon_fs_abfss_catalog",
"abfss","dfs",paimon_fs_abfss_db_tbl)
+
+ // TODO: Enable this once BE's HDFS dependency management is fully ready.
+ // This module requires higher-version JARs to support JDK 17 access.
+ /* sql """
+ set force_jni_scanner=true;
+ """
+ testPaimonTest(abfs_azure_config_props, "paimon_fs_abfs_catalog",
"abfs","dfs",paimon_fs_abfs_db_tbl)
+ testPaimonTest(abfs_azure_config_props, "paimon_fs_abfss_catalog",
"abfss","dfs",paimon_fs_abfss_db_tbl)*/
+
+
+}
\ No newline at end of file
diff --git
a/regression-test/suites/external_table_p2/refactor_catalog_param/azure_non_catalog_all_test.groovy
b/regression-test/suites/external_table_p2/refactor_catalog_param/azure_non_catalog_all_test.groovy
new file mode 100644
index 00000000000..296d7aed4f9
--- /dev/null
+++
b/regression-test/suites/external_table_p2/refactor_catalog_param/azure_non_catalog_all_test.groovy
@@ -0,0 +1,206 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+import org.awaitility.Awaitility;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static groovy.test.GroovyAssert.shouldFail
+
+suite("azure_non_catalog_all_test", "p2,external,new_catalog_property") {
+
+ // create internal table
+ def createDBAndTbl = { String dbName , String table->
+
+ sql """
+ drop database if exists ${dbName}
+ """
+
+ sql """
+ create database ${dbName}
+ """
+
+ sql """
+ use ${dbName}
+ """
+ sql """
+ CREATE TABLE ${table}(
+ user_id BIGINT NOT NULL COMMENT "user id",
+ name VARCHAR(20) COMMENT "name",
+ age INT COMMENT "age"
+ )
+ DUPLICATE KEY(user_id)
+ DISTRIBUTED BY HASH(user_id) BUCKETS 10
+ PROPERTIES (
+ "replication_num" = "1"
+ );
+ """
+ sql """
+ insert into ${table} values (1, 'a', 10);
+ """
+
+ def insertResult = sql """
+ SELECT count(1) FROM ${table}
+ """
+
+ println "insertResult: ${insertResult}"
+
+ assert insertResult.get(0).get(0) == 1
+ }
+ // test s3 load
+ def s3Load = { String objFilePath, String db,String table, String
objBucket, String storageProps ->
+
+ def label = "s3_load_label_" + System.currentTimeMillis()
+ sql """
+ LOAD LABEL `${label}` (
+ data infile ("${objFilePath}")
+ into table ${table}
+
+ FORMAT AS "PARQUET"
+ (
+ user_id,
+ name,
+ age
+ ))
+ with s3
+ (
+
+ ${storageProps}
+
+ )
+ PROPERTIES
+ (
+ "timeout" = "3600"
+ );
+ """
+ Awaitility.await().atMost(60, SECONDS).pollInterval(5, SECONDS).until({
+ sql """
+ use ${db}
+ """
+ def loadResult = sql """
+ show load where label = '${label}';
+ """
+ println "loadResult: ${loadResult}"
+ if(loadResult == null || loadResult.size() < 1 ) {
+ return false
+ }
+ if (loadResult.get(0).get(2) == 'CANCELLED' ||
loadResult.get(0).get(2) == 'FAILED') {
+ println("load failed: " + loadResult.get(0))
+ throw new RuntimeException("load failed"+ loadResult.get(0))
+ }
+ return loadResult.get(0).get(2) == 'FINISHED'
+ })
+
+ }
+ createDBAndTbl("azure_blob_test", "azure_blob_tbl")
+ String export_table_name = "azure_blob_tbl"
+ String export_db_name = "azure_blob_test"
+ String abfsAzureAccountName =
context.config.otherConfigs.get("abfsAccountName")
+ String abfsAzureAccountKey =
context.config.otherConfigs.get("abfsAccountKey")
+ String abfsContainer = context.config.otherConfigs.get("abfsContainer")
+ String abfsEndpoint = context.config.otherConfigs.get("abfsEndpoint")
+
+ def abfs_azure_config_props = """
+ "provider" = "azure",
+ "azure.endpoint"="${abfsEndpoint}",
+ "azure.account_name" = "${abfsAzureAccountName}",
+ "azure.account_key" = "${abfsAzureAccountKey}"
+ """
+
+ def old_abfs_azure_config_props = """
+ "provider" = "azure",
+ "s3.endpoint"="${abfsEndpoint}",
+ "s3.access_key" = "${abfsAzureAccountName}",
+ "s3.secret_key" = "${abfsAzureAccountKey}"
+ """
+ //outfile s3 only support s3://
+ def location_prefix = "s3://${abfsContainer}"
+ def full_export_path = "${location_prefix}/regression/azure/outfile/"
+ def res = sql """
+ SELECT * FROM ${export_table_name}
+ INTO OUTFILE "${full_export_path}"
+ FORMAT AS parquet
+ PROPERTIES (
+ ${abfs_azure_config_props}
+ );
+ """
+ def outfile_url = res[0][3];
+ println "outfile_url: ${outfile_url}"
+ //tvf
+ res = sql """
+ SELECT * FROM S3 (
+ "uri" = "${outfile_url}",
+
+ "format" = "parquet",
+ ${abfs_azure_config_props}
+ )
+ """
+ assert res.size() == 1
+ res = sql """
+ SELECT * FROM S3 (
+ "uri" = "${outfile_url}",
+
+ "format" = "parquet",
+ ${old_abfs_azure_config_props}
+ )
+ """
+ assert res.size() == 1
+ s3Load(outfile_url,export_db_name, export_table_name, abfsContainer,
abfs_azure_config_props)
+ s3Load(outfile_url, export_db_name,export_table_name, abfsContainer,
old_abfs_azure_config_props)
+ def blob_path = outfile_url.substring(location_prefix.length())
+ def s3_tvf_abfs_uri =
"abfs://${abfsContainer}@${abfsAzureAccountName}.dfs.core.windows.net/"+blob_path;
+ res = sql """
+ SELECT * FROM S3 (
+ "uri" = "${s3_tvf_abfs_uri}",
+
+ "format" = "parquet",
+ ${abfs_azure_config_props}
+ )
+ """
+ assert res.size() == 1
+ res = sql """
+ SELECT * FROM S3 (
+ "uri" = "${s3_tvf_abfs_uri}",
+
+ "format" = "parquet",
+ ${old_abfs_azure_config_props}
+ )
+ """
+ assert res.size() == 1
+ s3Load(s3_tvf_abfs_uri,export_db_name, export_table_name, abfsContainer,
abfs_azure_config_props)
+ s3Load(s3_tvf_abfs_uri, export_db_name,export_table_name, abfsContainer,
old_abfs_azure_config_props)
+ def se_tvf_abfss_uri =
"abfss://${abfsContainer}@${abfsAzureAccountName}.dfs.core.windows.net/"+blob_path;
+ res = sql """
+ SELECT * FROM S3 (
+ "uri" = "${se_tvf_abfss_uri}",
+
+ "format" = "parquet",
+ ${abfs_azure_config_props}
+ )
+ """
+ assert res.size() == 1
+ res = sql """
+ SELECT * FROM S3 (
+ "uri" = "${se_tvf_abfss_uri}",
+
+ "format" = "parquet",
+ ${old_abfs_azure_config_props}
+ )
+ """
+ assert res.size() == 1
+ s3Load(se_tvf_abfss_uri, export_db_name,export_table_name, abfsContainer,
abfs_azure_config_props)
+ s3Load(se_tvf_abfss_uri, export_db_name,export_table_name, abfsContainer,
old_abfs_azure_config_props)
+
+
+}
\ No newline at end of file
diff --git
a/regression-test/suites/external_table_p0/refactor_storage_param/backup_restore_azure.groovy
b/regression-test/suites/external_table_p2/refactor_catalog_param/backup_restore_azure.groovy
similarity index 69%
rename from
regression-test/suites/external_table_p0/refactor_storage_param/backup_restore_azure.groovy
rename to
regression-test/suites/external_table_p2/refactor_catalog_param/backup_restore_azure.groovy
index 6c5b4547fba..0ef23e3c3bd 100644
---
a/regression-test/suites/external_table_p0/refactor_storage_param/backup_restore_azure.groovy
+++
b/regression-test/suites/external_table_p2/refactor_catalog_param/backup_restore_azure.groovy
@@ -18,18 +18,11 @@ import org.awaitility.Awaitility;
import static java.util.concurrent.TimeUnit.SECONDS;
import static groovy.test.GroovyAssert.shouldFail
-suite("refactor_storage_backup_restore_azure", "p0,external") {
+suite("refactor_storage_backup_restore_azure",
"p2,external,new_catalog_property") {
- String enabled =
context.config.otherConfigs.get("enableAzureBackupRestoreTest")
- if (enabled == null || enabled.equalsIgnoreCase("false")) {
- return ;
- }
- String objPrefix = "azure"
- String container = context.config.otherConfigs.get("azure.container")
- String account =context.config.otherConfigs.get("azure.account")
- String s3_endpoint = "${account}.blob.core.windows.net"
- String ak = context.config.otherConfigs.get("azure.ak")
- String sk = context.config.otherConfigs.get("azure.sk")
+
+
+
def s3table = "test_backup_restore_azure";
@@ -79,7 +72,7 @@ suite("refactor_storage_backup_restore_azure", "p0,external")
{
assert insertResult.get(0).get(0) == 1
}
- def createRepository = { String repoName, String endpointName, String
endpoint, String regionName, String region, String accessKeyName, String
accessKey, String secretKeyName, String secretKey, String usePathStyle, String
location ->
+ def createRepository = { String repoName, String endpointName, String
endpoint, String accessKeyName, String accessKey, String secretKeyName, String
secretKey,String location ->
try {
sql """
drop repository ${repoName};
@@ -94,11 +87,9 @@ suite("refactor_storage_backup_restore_azure",
"p0,external") {
ON LOCATION "${location}"
PROPERTIES (
"${endpointName}" = "${endpoint}",
- "${regionName}" = "${region}",
"${accessKeyName}" = "${accessKey}",
"${secretKeyName}" = "${secretKey}",
- "provider"="azure",
- "use_path_style" = "${usePathStyle}"
+ "provider"="azure"
);
"""
}
@@ -109,7 +100,7 @@ suite("refactor_storage_backup_restore_azure",
"p0,external") {
TO ${repoName}
ON (${tableName})
"""
- Awaitility.await().atMost(60, SECONDS).pollInterval(5, SECONDS).until(
+ Awaitility.await().atMost(120, SECONDS).pollInterval(5, SECONDS).until(
{
def backupResult = sql """
show backup from ${dbName} where SnapshotName =
'${backupLabel}';
@@ -159,24 +150,36 @@ suite("refactor_storage_backup_restore_azure",
"p0,external") {
return false
}
})
+ sql """
+ drop repository ${repoName};
+ """
}
-
- def s3repoName1 = "azure_repo_1"
- createRepository("${s3repoName1}", "s3.endpoint", s3_endpoint,
"s3.region", "", "s3.access_key", ak, "s3.secret_key", sk, "true",
"s3://${container}/test_" + System.currentTimeMillis())
-
- def dbName1 = currentDBName + "${objPrefix}_1"
- createDBAndTbl("${dbName1}")
- backupAndRestore("${s3repoName1}", dbName1, s3table,
"backup_${s3repoName1}_test")
- def s3repoName2 = "${objPrefix}_repo_2"
- createRepository("${s3repoName2}", "s3.endpoint", s3_endpoint,
"s3.region", "", "s3.access_key", ak, "s3.secret_key", sk, "true",
"https://${s3_endpoint}/${container}/test_" + System.currentTimeMillis())
- def dbName2 = currentDBName + "${objPrefix}_2"
+ String abfsAzureAccountName =
context.config.otherConfigs.get("abfsAccountName")
+ String abfsAzureAccountKey =
context.config.otherConfigs.get("abfsAccountKey")
+ String abfsContainer = context.config.otherConfigs.get("abfsContainer")
+ String abfsEndpoint = context.config.otherConfigs.get("abfsEndpoint")
+ def s3repoName1 = "azure_repo_1_"+System.currentTimeMillis()
+ def repoPrefix = "regression/azure/backup_restore"
+
+
+ def s3repoName2 = "azure_repo_2_"+System.currentTimeMillis()
+ createRepository("${s3repoName2}", "azure.endpoint", abfsEndpoint,
"azure.access_key", abfsAzureAccountName, "azure.secret_key",
abfsAzureAccountKey,"s3://${abfsContainer}/${repoPrefix}/test_" +
System.currentTimeMillis())
+ def dbName2 = currentDBName + "azure_2"
createDBAndTbl("${dbName2}")
backupAndRestore("${s3repoName2}", dbName2, s3table,
"backup_${s3repoName2}_test")
- String failedRepoName = "azure_failed_repo"
- shouldFail {
- createRepository("${failedRepoName}", "s3.endpoint", s3_endpoint,
"s3.region", "", "s3.access_key", ak, "s3.secret_key", sk, "false",
"https://${s3_endpoint}/${container}/test_" + System.currentTimeMillis())
- }
+
+ def s3repoName3 = "azure_repo_3_"+System.currentTimeMillis()
+ createRepository("${s3repoName3}", "azure.endpoint", abfsEndpoint,
"azure.account_name", abfsAzureAccountName, "azure.account_key",
abfsAzureAccountKey,"abfs://${abfsContainer}@${abfsAzureAccountName}.dfs.core.windows.net/${repoPrefix}/test_"
+ System.currentTimeMillis())
+ def dbName3 = currentDBName + "azure_3"
+ createDBAndTbl("${dbName3}")
+ backupAndRestore("${s3repoName3}", dbName3, s3table,
"backup_${s3repoName3}_test")
+
+ def s3repoName4 = "azure_repo_4_"+System.currentTimeMillis()
+ createRepository("${s3repoName4}", "azure.endpoint", abfsEndpoint,
"azure.account_name", abfsAzureAccountName, "azure.account_key",
abfsAzureAccountKey,"https://${abfsAzureAccountName}.blob.core.windows.net/${abfsContainer}/${repoPrefix}/test_"
+ System.currentTimeMillis())
+ def dbName4 = currentDBName + "azure_4"
+ createDBAndTbl("${dbName4}")
+ backupAndRestore("${s3repoName4}", dbName4, s3table,
"backup_${s3repoName4}_test")
}
\ No newline at end of file
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]