This is an automated email from the ASF dual-hosted git repository.
yiguolei pushed a commit to branch branch-4.0
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/branch-4.0 by this push:
new 41f12fcedec branch-4.0: [feat](test)Add Azure Blob Test #57268 (#57457)
41f12fcedec is described below
commit 41f12fcedec251e184bdab317aa3d45306c5a392
Author: github-actions[bot]
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Tue Dec 2 10:26:35 2025 +0800
branch-4.0: [feat](test)Add Azure Blob Test #57268 (#57457)
Cherry-picked from #57268
Co-authored-by: Calvin Kirs <[email protected]>
---
be/src/runtime/snapshot_loader.cpp | 2 +-
.../org/apache/doris/backup/BackupHandler.java | 1 +
.../org/apache/doris/catalog/AzureResource.java | 5 +-
.../property/storage/AzurePropertyUtils.java | 23 +--
.../org/apache/doris/fs/obj/AzureObjStorage.java | 11 ++
.../azure_non_catalog_all_test.groovy | 206 +++++++++++++++++++++
.../backup_restore_azure.groovy | 63 ++++---
7 files changed, 264 insertions(+), 47 deletions(-)
diff --git a/be/src/runtime/snapshot_loader.cpp
b/be/src/runtime/snapshot_loader.cpp
index 9dbff7bbdff..d6fb88a6552 100644
--- a/be/src/runtime/snapshot_loader.cpp
+++ b/be/src/runtime/snapshot_loader.cpp
@@ -756,7 +756,7 @@ BaseSnapshotLoader::BaseSnapshotLoader(ExecEnv* env,
int64_t job_id, int64_t tas
}
Status BaseSnapshotLoader::init(TStorageBackendType::type type, const
std::string& location) {
- if (TStorageBackendType::type::S3 == type) {
+ if (TStorageBackendType::type::S3 == type ||
TStorageBackendType::type::AZURE == type) {
S3Conf s3_conf;
S3URI s3_uri(location);
RETURN_IF_ERROR(s3_uri.parse());
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java
b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java
index 9f7e3021fa0..cb1bad3d1e1 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java
@@ -220,6 +220,7 @@ public class BackupHandler extends MasterDaemon implements
Writable {
ErrorReport.reportDdlException(ErrorCode.ERR_COMMON_ERROR,
"Failed to create repository: " + st.getErrMsg());
}
+ //fixme why ping again? it has pinged in addAndInitRepoIfNotExist
if (!repo.ping()) {
ErrorReport.reportDdlException(ErrorCode.ERR_COMMON_ERROR,
"Failed to create repository: failed to connect to the
repo");
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/catalog/AzureResource.java
b/fe/fe-core/src/main/java/org/apache/doris/catalog/AzureResource.java
index ef278e6dcdf..54477da2db4 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/AzureResource.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/AzureResource.java
@@ -87,8 +87,9 @@ public class AzureResource extends Resource {
Map<String, String> newProperties) throws DdlException {
Long timestamp = System.currentTimeMillis();
- String testObj = "azure://" + bucketName + "/" + rootPath
- + "/doris-test-object-valid-" + timestamp.toString() + ".txt";
+ //todo @zyk Azure connection test
+ String testObj = "s3://" + bucketName + "/" + rootPath
+ + "/doris-test-object-valid-" + timestamp + ".txt";
byte[] contentData = new byte[2 * ObjStorage.CHUNK_SIZE];
Arrays.fill(contentData, (byte) 'A');
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AzurePropertyUtils.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AzurePropertyUtils.java
index f126620fba6..90152580701 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AzurePropertyUtils.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AzurePropertyUtils.java
@@ -60,14 +60,11 @@ public class AzurePropertyUtils {
if (StringUtils.isBlank(path)) {
throw new StoragePropertiesException("Path cannot be null or
empty");
}
-
- String lower = path.toLowerCase();
-
// Only accept Azure Blob Storage-related URI schemes
- if (!(lower.startsWith("wasb://") || lower.startsWith("wasbs://")
- || lower.startsWith("abfs://") || lower.startsWith("abfss://")
- || lower.startsWith("https://") || lower.startsWith("http://")
- || lower.startsWith("s3://"))) {
+ if (!(path.startsWith("wasb://") || path.startsWith("wasbs://")
+ || path.startsWith("abfs://") || path.startsWith("abfss://")
+ || path.startsWith("https://") || path.startsWith("http://")
+ || path.startsWith("s3://"))) {
throw new StoragePropertiesException("Unsupported Azure URI
scheme: " + path);
}
if (isOneLakeLocation(path)) {
@@ -99,14 +96,12 @@ public class AzurePropertyUtils {
if (StringUtils.isBlank(uri)) {
throw new StoragePropertiesException("URI is blank");
}
-
- String lowerUri = uri.toLowerCase();
- if (lowerUri.startsWith("s3://")) {
- return lowerUri;
+ if (uri.startsWith("s3://")) {
+ return uri;
}
// Handle Azure HDFS-style URIs (wasb://, wasbs://, abfs://, abfss://)
- if (lowerUri.startsWith("wasb://") || lowerUri.startsWith("wasbs://")
- || lowerUri.startsWith("abfs://") ||
lowerUri.startsWith("abfss://")) {
+ if (uri.startsWith("wasb://") || uri.startsWith("wasbs://")
+ || uri.startsWith("abfs://") || uri.startsWith("abfss://")) {
// Example:
wasbs://[email protected]/path/file.txt
String schemeRemoved = uri.replaceFirst("^[a-z]+s?://", "");
@@ -132,7 +127,7 @@ public class AzurePropertyUtils {
}
// ② Handle HTTPS/HTTP Azure Blob Storage URLs
- if (lowerUri.startsWith("https://") || lowerUri.startsWith("http://"))
{
+ if (uri.startsWith("https://") || uri.startsWith("http://")) {
try {
URI parsed = new URI(uri);
String host = parsed.getHost();
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/fs/obj/AzureObjStorage.java
b/fe/fe-core/src/main/java/org/apache/doris/fs/obj/AzureObjStorage.java
index 4834253b71a..6b0c198d841 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/fs/obj/AzureObjStorage.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/fs/obj/AzureObjStorage.java
@@ -23,6 +23,7 @@ import org.apache.doris.common.UserException;
import org.apache.doris.common.util.S3URI;
import org.apache.doris.common.util.S3Util;
import org.apache.doris.datasource.property.storage.AzureProperties;
+import org.apache.doris.datasource.property.storage.AzurePropertyUtils;
import org.apache.doris.fs.remote.RemoteFile;
import com.azure.core.http.rest.PagedIterable;
@@ -120,6 +121,7 @@ public class AzureObjStorage implements
ObjStorage<BlobServiceClient> {
@Override
public Status headObject(String remotePath) {
try {
+ remotePath =
AzurePropertyUtils.validateAndNormalizeUri(remotePath);
S3URI uri = S3URI.create(remotePath, isUsePathStyle,
forceParsingByStandardUri);
BlobClient blobClient =
getClient().getBlobContainerClient(uri.getBucket()).getBlobClient(uri.getKey());
if (LOG.isDebugEnabled()) {
@@ -144,6 +146,7 @@ public class AzureObjStorage implements
ObjStorage<BlobServiceClient> {
@Override
public Status getObject(String remoteFilePath, File localFile) {
try {
+ remoteFilePath =
AzurePropertyUtils.validateAndNormalizeUri(remoteFilePath);
S3URI uri = S3URI.create(remoteFilePath, isUsePathStyle,
forceParsingByStandardUri);
BlobClient blobClient =
getClient().getBlobContainerClient(uri.getBucket()).getBlobClient(uri.getKey());
BlobProperties properties =
blobClient.downloadToFile(localFile.getAbsolutePath());
@@ -164,6 +167,7 @@ public class AzureObjStorage implements
ObjStorage<BlobServiceClient> {
@Override
public Status putObject(String remotePath, @Nullable InputStream content,
long contentLength) {
try {
+ remotePath =
AzurePropertyUtils.validateAndNormalizeUri(remotePath);
S3URI uri = S3URI.create(remotePath, isUsePathStyle,
forceParsingByStandardUri);
BlobClient blobClient =
getClient().getBlobContainerClient(uri.getBucket()).getBlobClient(uri.getKey());
blobClient.upload(content, contentLength);
@@ -181,6 +185,7 @@ public class AzureObjStorage implements
ObjStorage<BlobServiceClient> {
@Override
public Status deleteObject(String remotePath) {
try {
+ remotePath =
AzurePropertyUtils.validateAndNormalizeUri(remotePath);
S3URI uri = S3URI.create(remotePath, isUsePathStyle,
forceParsingByStandardUri);
BlobClient blobClient =
getClient().getBlobContainerClient(uri.getBucket()).getBlobClient(uri.getKey());
blobClient.delete();
@@ -204,6 +209,7 @@ public class AzureObjStorage implements
ObjStorage<BlobServiceClient> {
@Override
public Status deleteObjects(String remotePath) {
try {
+ remotePath =
AzurePropertyUtils.validateAndNormalizeUri(remotePath);
S3URI uri = S3URI.create(remotePath, isUsePathStyle,
forceParsingByStandardUri);
BlobContainerClient blobClient =
getClient().getBlobContainerClient(uri.getBucket());
String containerUrl = blobClient.getBlobContainerUrl();
@@ -285,6 +291,7 @@ public class AzureObjStorage implements
ObjStorage<BlobServiceClient> {
@Override
public RemoteObjects listObjects(String remotePath, String
continuationToken) throws DdlException {
try {
+ remotePath =
AzurePropertyUtils.validateAndNormalizeUri(remotePath);
S3URI uri = S3URI.create(remotePath, isUsePathStyle,
forceParsingByStandardUri);
ListBlobsOptions options = new
ListBlobsOptions().setPrefix(uri.getKey());
PagedIterable<BlobItem> pagedBlobs =
getClient().getBlobContainerClient(uri.getBucket())
@@ -320,6 +327,7 @@ public class AzureObjStorage implements
ObjStorage<BlobServiceClient> {
public Status listDirectories(String remotePath, Set<String> result) {
try {
+ remotePath =
AzurePropertyUtils.validateAndNormalizeUri(remotePath);
S3URI uri = S3URI.create(remotePath, isUsePathStyle,
forceParsingByStandardUri);
String bucket = uri.getBucket();
String key = uri.getKey();
@@ -347,6 +355,7 @@ public class AzureObjStorage implements
ObjStorage<BlobServiceClient> {
long startTime = System.nanoTime();
Status st = Status.OK;
try {
+ remotePath =
AzurePropertyUtils.validateAndNormalizeUri(remotePath);
S3URI uri = S3URI.create(remotePath, isUsePathStyle,
forceParsingByStandardUri);
String globPath = S3Util.extendGlobs(uri.getKey());
String bucket = uri.getBucket();
@@ -429,6 +438,7 @@ public class AzureObjStorage implements
ObjStorage<BlobServiceClient> {
public Status listFiles(String remotePath, boolean recursive,
List<RemoteFile> result) {
try {
+ remotePath =
AzurePropertyUtils.validateAndNormalizeUri(remotePath);
S3URI uri = S3URI.create(remotePath, isUsePathStyle,
forceParsingByStandardUri);
String bucket = uri.getBucket();
String key = uri.getKey();
@@ -482,6 +492,7 @@ public class AzureObjStorage implements
ObjStorage<BlobServiceClient> {
try {
+ remotePath =
AzurePropertyUtils.validateAndNormalizeUri(remotePath);
S3URI uri = S3URI.create(remotePath, isUsePathStyle,
forceParsingByStandardUri);
blockBlobClient =
getClient().getBlobContainerClient(uri.getBucket())
.getBlobClient(uri.getKey()).getBlockBlobClient();
diff --git
a/regression-test/suites/external_table_p2/refactor_catalog_param/azure_non_catalog_all_test.groovy
b/regression-test/suites/external_table_p2/refactor_catalog_param/azure_non_catalog_all_test.groovy
new file mode 100644
index 00000000000..296d7aed4f9
--- /dev/null
+++
b/regression-test/suites/external_table_p2/refactor_catalog_param/azure_non_catalog_all_test.groovy
@@ -0,0 +1,206 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+import org.awaitility.Awaitility;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static groovy.test.GroovyAssert.shouldFail
+
+suite("azure_non_catalog_all_test", "p2,external,new_catalog_property") {
+
+ // create internal table
+ def createDBAndTbl = { String dbName , String table->
+
+ sql """
+ drop database if exists ${dbName}
+ """
+
+ sql """
+ create database ${dbName}
+ """
+
+ sql """
+ use ${dbName}
+ """
+ sql """
+ CREATE TABLE ${table}(
+ user_id BIGINT NOT NULL COMMENT "user id",
+ name VARCHAR(20) COMMENT "name",
+ age INT COMMENT "age"
+ )
+ DUPLICATE KEY(user_id)
+ DISTRIBUTED BY HASH(user_id) BUCKETS 10
+ PROPERTIES (
+ "replication_num" = "1"
+ );
+ """
+ sql """
+ insert into ${table} values (1, 'a', 10);
+ """
+
+ def insertResult = sql """
+ SELECT count(1) FROM ${table}
+ """
+
+ println "insertResult: ${insertResult}"
+
+ assert insertResult.get(0).get(0) == 1
+ }
+ // test s3 load
+ def s3Load = { String objFilePath, String db,String table, String
objBucket, String storageProps ->
+
+ def label = "s3_load_label_" + System.currentTimeMillis()
+ sql """
+ LOAD LABEL `${label}` (
+ data infile ("${objFilePath}")
+ into table ${table}
+
+ FORMAT AS "PARQUET"
+ (
+ user_id,
+ name,
+ age
+ ))
+ with s3
+ (
+
+ ${storageProps}
+
+ )
+ PROPERTIES
+ (
+ "timeout" = "3600"
+ );
+ """
+ Awaitility.await().atMost(60, SECONDS).pollInterval(5, SECONDS).until({
+ sql """
+ use ${db}
+ """
+ def loadResult = sql """
+ show load where label = '${label}';
+ """
+ println "loadResult: ${loadResult}"
+ if(loadResult == null || loadResult.size() < 1 ) {
+ return false
+ }
+ if (loadResult.get(0).get(2) == 'CANCELLED' ||
loadResult.get(0).get(2) == 'FAILED') {
+ println("load failed: " + loadResult.get(0))
+ throw new RuntimeException("load failed"+ loadResult.get(0))
+ }
+ return loadResult.get(0).get(2) == 'FINISHED'
+ })
+
+ }
+ createDBAndTbl("azure_blob_test", "azure_blob_tbl")
+ String export_table_name = "azure_blob_tbl"
+ String export_db_name = "azure_blob_test"
+ String abfsAzureAccountName =
context.config.otherConfigs.get("abfsAccountName")
+ String abfsAzureAccountKey =
context.config.otherConfigs.get("abfsAccountKey")
+ String abfsContainer = context.config.otherConfigs.get("abfsContainer")
+ String abfsEndpoint = context.config.otherConfigs.get("abfsEndpoint")
+
+ def abfs_azure_config_props = """
+ "provider" = "azure",
+ "azure.endpoint"="${abfsEndpoint}",
+ "azure.account_name" = "${abfsAzureAccountName}",
+ "azure.account_key" = "${abfsAzureAccountKey}"
+ """
+
+ def old_abfs_azure_config_props = """
+ "provider" = "azure",
+ "s3.endpoint"="${abfsEndpoint}",
+ "s3.access_key" = "${abfsAzureAccountName}",
+ "s3.secret_key" = "${abfsAzureAccountKey}"
+ """
+ //outfile s3 only support s3://
+ def location_prefix = "s3://${abfsContainer}"
+ def full_export_path = "${location_prefix}/regression/azure/outfile/"
+ def res = sql """
+ SELECT * FROM ${export_table_name}
+ INTO OUTFILE "${full_export_path}"
+ FORMAT AS parquet
+ PROPERTIES (
+ ${abfs_azure_config_props}
+ );
+ """
+ def outfile_url = res[0][3];
+ println "outfile_url: ${outfile_url}"
+ //tvf
+ res = sql """
+ SELECT * FROM S3 (
+ "uri" = "${outfile_url}",
+
+ "format" = "parquet",
+ ${abfs_azure_config_props}
+ )
+ """
+ assert res.size() == 1
+ res = sql """
+ SELECT * FROM S3 (
+ "uri" = "${outfile_url}",
+
+ "format" = "parquet",
+ ${old_abfs_azure_config_props}
+ )
+ """
+ assert res.size() == 1
+ s3Load(outfile_url,export_db_name, export_table_name, abfsContainer,
abfs_azure_config_props)
+ s3Load(outfile_url, export_db_name,export_table_name, abfsContainer,
old_abfs_azure_config_props)
+ def blob_path = outfile_url.substring(location_prefix.length())
+ def s3_tvf_abfs_uri =
"abfs://${abfsContainer}@${abfsAzureAccountName}.dfs.core.windows.net/"+blob_path;
+ res = sql """
+ SELECT * FROM S3 (
+ "uri" = "${s3_tvf_abfs_uri}",
+
+ "format" = "parquet",
+ ${abfs_azure_config_props}
+ )
+ """
+ assert res.size() == 1
+ res = sql """
+ SELECT * FROM S3 (
+ "uri" = "${s3_tvf_abfs_uri}",
+
+ "format" = "parquet",
+ ${old_abfs_azure_config_props}
+ )
+ """
+ assert res.size() == 1
+ s3Load(s3_tvf_abfs_uri,export_db_name, export_table_name, abfsContainer,
abfs_azure_config_props)
+ s3Load(s3_tvf_abfs_uri, export_db_name,export_table_name, abfsContainer,
old_abfs_azure_config_props)
+ def se_tvf_abfss_uri =
"abfss://${abfsContainer}@${abfsAzureAccountName}.dfs.core.windows.net/"+blob_path;
+ res = sql """
+ SELECT * FROM S3 (
+ "uri" = "${se_tvf_abfss_uri}",
+
+ "format" = "parquet",
+ ${abfs_azure_config_props}
+ )
+ """
+ assert res.size() == 1
+ res = sql """
+ SELECT * FROM S3 (
+ "uri" = "${se_tvf_abfss_uri}",
+
+ "format" = "parquet",
+ ${old_abfs_azure_config_props}
+ )
+ """
+ assert res.size() == 1
+ s3Load(se_tvf_abfss_uri, export_db_name,export_table_name, abfsContainer,
abfs_azure_config_props)
+ s3Load(se_tvf_abfss_uri, export_db_name,export_table_name, abfsContainer,
old_abfs_azure_config_props)
+
+
+}
\ No newline at end of file
diff --git
a/regression-test/suites/external_table_p0/refactor_storage_param/backup_restore_azure.groovy
b/regression-test/suites/external_table_p2/refactor_catalog_param/backup_restore_azure.groovy
similarity index 69%
rename from
regression-test/suites/external_table_p0/refactor_storage_param/backup_restore_azure.groovy
rename to
regression-test/suites/external_table_p2/refactor_catalog_param/backup_restore_azure.groovy
index 6c5b4547fba..0ef23e3c3bd 100644
---
a/regression-test/suites/external_table_p0/refactor_storage_param/backup_restore_azure.groovy
+++
b/regression-test/suites/external_table_p2/refactor_catalog_param/backup_restore_azure.groovy
@@ -18,18 +18,11 @@ import org.awaitility.Awaitility;
import static java.util.concurrent.TimeUnit.SECONDS;
import static groovy.test.GroovyAssert.shouldFail
-suite("refactor_storage_backup_restore_azure", "p0,external") {
+suite("refactor_storage_backup_restore_azure",
"p2,external,new_catalog_property") {
- String enabled =
context.config.otherConfigs.get("enableAzureBackupRestoreTest")
- if (enabled == null || enabled.equalsIgnoreCase("false")) {
- return ;
- }
- String objPrefix = "azure"
- String container = context.config.otherConfigs.get("azure.container")
- String account =context.config.otherConfigs.get("azure.account")
- String s3_endpoint = "${account}.blob.core.windows.net"
- String ak = context.config.otherConfigs.get("azure.ak")
- String sk = context.config.otherConfigs.get("azure.sk")
+
+
+
def s3table = "test_backup_restore_azure";
@@ -79,7 +72,7 @@ suite("refactor_storage_backup_restore_azure", "p0,external")
{
assert insertResult.get(0).get(0) == 1
}
- def createRepository = { String repoName, String endpointName, String
endpoint, String regionName, String region, String accessKeyName, String
accessKey, String secretKeyName, String secretKey, String usePathStyle, String
location ->
+ def createRepository = { String repoName, String endpointName, String
endpoint, String accessKeyName, String accessKey, String secretKeyName, String
secretKey,String location ->
try {
sql """
drop repository ${repoName};
@@ -94,11 +87,9 @@ suite("refactor_storage_backup_restore_azure",
"p0,external") {
ON LOCATION "${location}"
PROPERTIES (
"${endpointName}" = "${endpoint}",
- "${regionName}" = "${region}",
"${accessKeyName}" = "${accessKey}",
"${secretKeyName}" = "${secretKey}",
- "provider"="azure",
- "use_path_style" = "${usePathStyle}"
+ "provider"="azure"
);
"""
}
@@ -109,7 +100,7 @@ suite("refactor_storage_backup_restore_azure",
"p0,external") {
TO ${repoName}
ON (${tableName})
"""
- Awaitility.await().atMost(60, SECONDS).pollInterval(5, SECONDS).until(
+ Awaitility.await().atMost(120, SECONDS).pollInterval(5, SECONDS).until(
{
def backupResult = sql """
show backup from ${dbName} where SnapshotName =
'${backupLabel}';
@@ -159,24 +150,36 @@ suite("refactor_storage_backup_restore_azure",
"p0,external") {
return false
}
})
+ sql """
+ drop repository ${repoName};
+ """
}
-
- def s3repoName1 = "azure_repo_1"
- createRepository("${s3repoName1}", "s3.endpoint", s3_endpoint,
"s3.region", "", "s3.access_key", ak, "s3.secret_key", sk, "true",
"s3://${container}/test_" + System.currentTimeMillis())
-
- def dbName1 = currentDBName + "${objPrefix}_1"
- createDBAndTbl("${dbName1}")
- backupAndRestore("${s3repoName1}", dbName1, s3table,
"backup_${s3repoName1}_test")
- def s3repoName2 = "${objPrefix}_repo_2"
- createRepository("${s3repoName2}", "s3.endpoint", s3_endpoint,
"s3.region", "", "s3.access_key", ak, "s3.secret_key", sk, "true",
"https://${s3_endpoint}/${container}/test_" + System.currentTimeMillis())
- def dbName2 = currentDBName + "${objPrefix}_2"
+ String abfsAzureAccountName =
context.config.otherConfigs.get("abfsAccountName")
+ String abfsAzureAccountKey =
context.config.otherConfigs.get("abfsAccountKey")
+ String abfsContainer = context.config.otherConfigs.get("abfsContainer")
+ String abfsEndpoint = context.config.otherConfigs.get("abfsEndpoint")
+ def s3repoName1 = "azure_repo_1_"+System.currentTimeMillis()
+ def repoPrefix = "regression/azure/backup_restore"
+
+
+ def s3repoName2 = "azure_repo_2_"+System.currentTimeMillis()
+ createRepository("${s3repoName2}", "azure.endpoint", abfsEndpoint,
"azure.access_key", abfsAzureAccountName, "azure.secret_key",
abfsAzureAccountKey,"s3://${abfsContainer}/${repoPrefix}/test_" +
System.currentTimeMillis())
+ def dbName2 = currentDBName + "azure_2"
createDBAndTbl("${dbName2}")
backupAndRestore("${s3repoName2}", dbName2, s3table,
"backup_${s3repoName2}_test")
- String failedRepoName = "azure_failed_repo"
- shouldFail {
- createRepository("${failedRepoName}", "s3.endpoint", s3_endpoint,
"s3.region", "", "s3.access_key", ak, "s3.secret_key", sk, "false",
"https://${s3_endpoint}/${container}/test_" + System.currentTimeMillis())
- }
+
+ def s3repoName3 = "azure_repo_3_"+System.currentTimeMillis()
+ createRepository("${s3repoName3}", "azure.endpoint", abfsEndpoint,
"azure.account_name", abfsAzureAccountName, "azure.account_key",
abfsAzureAccountKey,"abfs://${abfsContainer}@${abfsAzureAccountName}.dfs.core.windows.net/${repoPrefix}/test_"
+ System.currentTimeMillis())
+ def dbName3 = currentDBName + "azure_3"
+ createDBAndTbl("${dbName3}")
+ backupAndRestore("${s3repoName3}", dbName3, s3table,
"backup_${s3repoName3}_test")
+
+ def s3repoName4 = "azure_repo_4_"+System.currentTimeMillis()
+ createRepository("${s3repoName4}", "azure.endpoint", abfsEndpoint,
"azure.account_name", abfsAzureAccountName, "azure.account_key",
abfsAzureAccountKey,"https://${abfsAzureAccountName}.blob.core.windows.net/${abfsContainer}/${repoPrefix}/test_"
+ System.currentTimeMillis())
+ def dbName4 = currentDBName + "azure_4"
+ createDBAndTbl("${dbName4}")
+ backupAndRestore("${s3repoName4}", dbName4, s3table,
"backup_${s3repoName4}_test")
}
\ No newline at end of file
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]