This is an automated email from the ASF dual-hosted git repository.
dataroaring pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/master by this push:
new 3692aba99ab [test](vault) Add more regression test about storage vault
(#47449)
3692aba99ab is described below
commit 3692aba99ab37d74a31bd422a4924cd77c65c40a
Author: Lei Zhang <[email protected]>
AuthorDate: Wed Feb 12 12:05:23 2025 +0800
[test](vault) Add more regression test about storage vault (#47449)
* Add case sensitive test
* Add kerberos test
---
.../apache/doris/analysis/CreateResourceStmt.java | 11 +-
.../doris/analysis/CreateStorageVaultStmt.java | 13 +-
.../org/apache/doris/catalog/HdfsStorageVault.java | 27 +++
.../org/apache/doris/catalog/StorageVaultMgr.java | 6 +-
.../vault_p0/alter/test_alter_vault_name.groovy | 11 +
.../concurent/test_alter_vault_concurrently.groovy | 128 +++++++++++
.../test_create_vault_concurrently.groovy | 132 +++++++++++
.../test_default_vault_concurrenlty.groovy | 127 ++++++++++
.../vault_p0/create/test_create_vault.groovy | 23 ++
...> test_create_vault_with_case_sensitive.groovy} | 256 +++++++++------------
.../create/test_create_vault_with_kerberos.groovy | 106 +++++++++
...> test_vault_privilege_with_multi_roles.groovy} | 86 +++++--
.../test_vault_privilege_with_role.groovy | 78 ++++++-
.../test_vault_privilege_with_user.groovy | 8 +
14 files changed, 838 insertions(+), 174 deletions(-)
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateResourceStmt.java
b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateResourceStmt.java
index 4a358510fa0..3feccbba9ba 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateResourceStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateResourceStmt.java
@@ -30,6 +30,8 @@ import
org.apache.doris.datasource.property.constants.AzureProperties;
import org.apache.doris.mysql.privilege.PrivPredicate;
import org.apache.doris.qe.ConnectContext;
+import com.google.common.base.Strings;
+
import java.util.Map;
// CREATE [EXTERNAL] RESOURCE resource_name
@@ -69,8 +71,13 @@ public class CreateResourceStmt extends DdlStmt implements
NotFallbackInParser {
}
public void analyzeResourceType() throws UserException {
- String type = properties.get(TYPE);
- if (type == null) {
+ String type = null;
+ for (Map.Entry<String, String> property : properties.entrySet()) {
+ if (property.getKey().equalsIgnoreCase(TYPE)) {
+ type = property.getValue();
+ }
+ }
+ if (Strings.isNullOrEmpty(type)) {
throw new AnalysisException("Resource type can't be null");
}
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateStorageVaultStmt.java
b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateStorageVaultStmt.java
index f1aff6b9ab6..6e414db9314 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateStorageVaultStmt.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateStorageVaultStmt.java
@@ -31,6 +31,8 @@ import org.apache.doris.common.util.PrintableMap;
import org.apache.doris.mysql.privilege.PrivPredicate;
import org.apache.doris.qe.ConnectContext;
+import com.google.common.base.Strings;
+
import java.util.Map;
// CREATE STORAGE VAULT vault_name
@@ -119,10 +121,17 @@ public class CreateStorageVaultStmt extends DdlStmt
implements NotFallbackInPars
if (properties == null || properties.isEmpty()) {
throw new AnalysisException("Storage Vault properties can't be
null");
}
- String type = properties.get(TYPE);
- if (type == null) {
+
+ String type = null;
+ for (Map.Entry<String, String> property : properties.entrySet()) {
+ if (property.getKey().equalsIgnoreCase(TYPE)) {
+ type = property.getValue();
+ }
+ }
+ if (Strings.isNullOrEmpty(type)) {
throw new AnalysisException("Storage Vault type can't be null");
}
+
final String pathVersionString = properties.get(PATH_VERSION);
if (pathVersionString != null) {
this.pathVersion = Integer.parseInt(pathVersionString);
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/catalog/HdfsStorageVault.java
b/fe/fe-core/src/main/java/org/apache/doris/catalog/HdfsStorageVault.java
index 9be463ee3a1..03bb0fcaef6 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/HdfsStorageVault.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/HdfsStorageVault.java
@@ -20,10 +20,14 @@ package org.apache.doris.catalog;
import org.apache.doris.cloud.proto.Cloud;
import org.apache.doris.common.DdlException;
import org.apache.doris.common.security.authentication.AuthenticationConfig;
+import org.apache.doris.datasource.property.constants.S3Properties;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Maps;
import com.google.gson.annotations.SerializedName;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@@ -31,6 +35,7 @@ import java.util.Arrays;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
+import java.util.stream.Collectors;
/**
* HDFS resource
@@ -95,20 +100,42 @@ public class HdfsStorageVault extends StorageVault {
Cloud.HdfsVaultInfo.Builder hdfsVaultInfoBuilder =
Cloud.HdfsVaultInfo.newBuilder();
Cloud.HdfsBuildConf.Builder hdfsConfBuilder =
Cloud.HdfsBuildConf.newBuilder();
+
+ Set<String> lowerCaseKeys =
properties.keySet().stream().map(String::toLowerCase)
+ .collect(Collectors.toSet());
+
for (Map.Entry<String, String> property : properties.entrySet()) {
if (property.getKey().equalsIgnoreCase(HADOOP_FS_NAME)) {
+
Preconditions.checkArgument(!Strings.isNullOrEmpty(property.getValue()),
+ "%s is null or empty", property.getKey());
hdfsConfBuilder.setFsName(property.getValue());
} else if (property.getKey().equalsIgnoreCase(VAULT_PATH_PREFIX)) {
hdfsVaultInfoBuilder.setPrefix(property.getValue());
} else if
(property.getKey().equalsIgnoreCase(AuthenticationConfig.HADOOP_USER_NAME)) {
+
Preconditions.checkArgument(!Strings.isNullOrEmpty(property.getValue()),
+ "%s is null or empty", property.getKey());
hdfsConfBuilder.setUser(property.getValue());
+ } else if (property.getKey()
+
.equalsIgnoreCase(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION))
{
+
Preconditions.checkArgument(lowerCaseKeys.contains(AuthenticationConfig.HADOOP_KERBEROS_PRINCIPAL),
+ "%s is required for kerberos",
AuthenticationConfig.HADOOP_KERBEROS_PRINCIPAL);
+
Preconditions.checkArgument(lowerCaseKeys.contains(AuthenticationConfig.HADOOP_KERBEROS_KEYTAB),
+ "%s is required for kerberos",
AuthenticationConfig.HADOOP_KERBEROS_KEYTAB);
} else if
(property.getKey().equalsIgnoreCase(AuthenticationConfig.HADOOP_KERBEROS_PRINCIPAL))
{
+
Preconditions.checkArgument(!Strings.isNullOrEmpty(property.getValue()),
+ "%s is null or empty", property.getKey());
hdfsConfBuilder.setHdfsKerberosPrincipal(property.getValue());
} else if
(property.getKey().equalsIgnoreCase(AuthenticationConfig.HADOOP_KERBEROS_KEYTAB))
{
+
Preconditions.checkArgument(!Strings.isNullOrEmpty(property.getValue()),
+ "%s is null or empty", property.getKey());
hdfsConfBuilder.setHdfsKerberosKeytab(property.getValue());
} else if (property.getKey().equalsIgnoreCase(VAULT_NAME)) {
continue;
} else {
+
Preconditions.checkArgument(!property.getKey().toLowerCase().contains(S3Properties.S3_PREFIX),
+ "Invalid argument %s", property.getKey());
+
Preconditions.checkArgument(!property.getKey().toLowerCase().contains(S3Properties.PROVIDER),
+ "Invalid argument %s", property.getKey());
if
(!nonHdfsConfPropertyKeys.contains(property.getKey().toLowerCase())) {
Cloud.HdfsBuildConf.HdfsConfKVPair.Builder conf =
Cloud.HdfsBuildConf.HdfsConfKVPair.newBuilder();
conf.setKey(property.getKey());
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVaultMgr.java
b/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVaultMgr.java
index 5ad0417d7dd..e5088a7795c 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVaultMgr.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVaultMgr.java
@@ -126,10 +126,10 @@ public class StorageVaultMgr {
rwLock.writeLock().lock();
String cachedVaultId = vaultNameToVaultId.get(oldVaultName);
vaultNameToVaultId.remove(oldVaultName);
- Preconditions.checkArgument(!Strings.isNullOrEmpty(cachedVaultId),
cachedVaultId,
- "Cached vault id is null or empty");
+ Preconditions.checkArgument(!Strings.isNullOrEmpty(cachedVaultId),
+ "Cached vault id %s is null or empty", cachedVaultId);
Preconditions.checkArgument(cachedVaultId.equals(vaultId),
- "Cached vault id not equal to remote storage." +
cachedVaultId + " - " + vaultId);
+ "Cached vault id not equal to remote storage. %s vs %s",
cachedVaultId, vaultId);
vaultNameToVaultId.put(newVaultName, vaultId);
} finally {
rwLock.writeLock().unlock();
diff --git a/regression-test/suites/vault_p0/alter/test_alter_vault_name.groovy
b/regression-test/suites/vault_p0/alter/test_alter_vault_name.groovy
index e094e12056d..c460c361b9c 100644
--- a/regression-test/suites/vault_p0/alter/test_alter_vault_name.groovy
+++ b/regression-test/suites/vault_p0/alter/test_alter_vault_name.groovy
@@ -101,6 +101,17 @@ suite("test_alter_vault_name", "nonConcurrent") {
}, "already existed")
// case5
+ expectExceptionLike({
+ sql """
+ ALTER STORAGE VAULT ${s3VaultName}
+ PROPERTIES (
+ "type" = "s3",
+ "VAULT_NAME" = "@#¥%*&-+=null."
+ );
+ """
+ }, "Incorrect vault name")
+
+ // case6
sql """
CREATE TABLE ${hdfsVaultName} (
C_CUSTKEY INTEGER NOT NULL,
diff --git
a/regression-test/suites/vault_p0/concurent/test_alter_vault_concurrently.groovy
b/regression-test/suites/vault_p0/concurent/test_alter_vault_concurrently.groovy
new file mode 100644
index 00000000000..b7128acab84
--- /dev/null
+++
b/regression-test/suites/vault_p0/concurent/test_alter_vault_concurrently.groovy
@@ -0,0 +1,128 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+suite("test_alter_vault_concurrently", "nonConcurrent") {
+ def suiteName = name;
+ if (!isCloudMode()) {
+ logger.info("skip ${name} case, because not cloud mode")
+ return
+ }
+
+ if (!enableStoragevault()) {
+ logger.info("skip ${name} case, because storage vault not enabled")
+ return
+ }
+
+ def randomStr = UUID.randomUUID().toString().replace("-", "")
+ def s3VaultName = "s3_" + randomStr
+
+ sql """
+ CREATE STORAGE VAULT ${s3VaultName}
+ PROPERTIES (
+ "type"="S3",
+ "s3.endpoint"="${getS3Endpoint()}",
+ "s3.region" = "${getS3Region()}",
+ "s3.access_key" = "${getS3AK()}",
+ "s3.secret_key" = "${getS3SK()}",
+ "s3.root.path" = "${s3VaultName}",
+ "s3.bucket" = "${getS3BucketName()}",
+ "s3.external_endpoint" = "",
+ "provider" = "${getS3Provider()}",
+ "use_path_style" = "false"
+ );
+ """
+
+ def future1 = thread("threadName1") {
+ try_sql """
+ ALTER STORAGE VAULT ${s3VaultName}
+ PROPERTIES (
+ "type"="S3",
+ "VAULT_NAME" = "${s3VaultName}_1"
+ );
+ """
+ }
+
+ def future2 = thread("threadName2") {
+ try_sql """
+ ALTER STORAGE VAULT ${s3VaultName}
+ PROPERTIES (
+ "type"="S3",
+ "VAULT_NAME" = "${s3VaultName}_2"
+ );
+ """
+ }
+
+ def combineFuture = combineFutures(future1, future2)
+ List<List<List<Object>>> result = combineFuture.get()
+ logger.info("${result}")
+
+ def hitNum = 0
+ def vaultsInfo = try_sql """ SHOW STORAGE VAULTS """
+ def newS3VaultName = null
+
+ for (int i = 0; i < vaultsInfo.size(); i++) {
+ def name = vaultsInfo[i][0]
+ if (name.contains(s3VaultName)) {
+ hitNum++
+ newS3VaultName = name
+ assertTrue(name.equalsIgnoreCase("${s3VaultName}_1") ||
name.equalsIgnoreCase("${s3VaultName}_2"))
+ }
+ }
+ assertEquals(hitNum, 1)
+
+ future1 = thread("threadName1") {
+ try_sql """
+ ALTER STORAGE VAULT ${newS3VaultName}
+ PROPERTIES (
+ "type"="S3",
+ "VAULT_NAME" = "${s3VaultName}_1"
+ "s3.access_key" = "error_ak_1",
+ "s3.secret_key" = "error_sk_1"
+ );
+ """
+ }
+
+ future2 = thread("threadName2") {
+ try_sql """
+ ALTER STORAGE VAULT ${newS3VaultName}
+ PROPERTIES (
+ "type"="S3",
+ "s3.access_key" = "error_ak_2",
+ "s3.secret_key" = "error_sk_2"
+ );
+ """
+ }
+
+ combineFuture = combineFutures(future1, future2)
+ result = combineFuture.get()
+ logger.info("${result}")
+
+ vaultsInfo = try_sql """ SHOW STORAGE VAULTS """
+ def found = false
+ for (int i = 0; i < vaultsInfo.size(); i++) {
+ def name = vaultsInfo[i][0]
+ if (name.contains(newS3VaultName)) {
+ logger.info("${vaultsInfo[i]}");
+ assertTrue(vaultsInfo[i][2].contains("error_ak_1") ||
vaultsInfo[i][2].contains("error_ak_2"))
+ found = true
+ }
+ }
+ assertTrue(found)
+}
diff --git
a/regression-test/suites/vault_p0/concurent/test_create_vault_concurrently.groovy
b/regression-test/suites/vault_p0/concurent/test_create_vault_concurrently.groovy
new file mode 100644
index 00000000000..985bf971e3a
--- /dev/null
+++
b/regression-test/suites/vault_p0/concurent/test_create_vault_concurrently.groovy
@@ -0,0 +1,132 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+suite("test_create_vault_concurrently", "nonConcurrent") {
+ def suiteName = name;
+ if (!isCloudMode()) {
+ logger.info("skip ${name} case, because not cloud mode")
+ return
+ }
+
+ if (!enableStoragevault()) {
+ logger.info("skip ${name} case, because storage vault not enabled")
+ return
+ }
+
+ def randomStr = UUID.randomUUID().toString().replace("-", "")
+ def s3VaultName = "s3_" + randomStr
+
+ def future1 = thread("threadName1") {
+ for (int i = 0; i < 100; i++) {
+ sql """
+ CREATE STORAGE VAULT IF NOT EXISTS ${s3VaultName}
+ PROPERTIES (
+ "type"="S3",
+ "s3.endpoint"="${getS3Endpoint()}",
+ "s3.region" = "${getS3Region()}",
+ "s3.access_key" = "${getS3AK()}",
+ "s3.secret_key" = "${getS3SK()}",
+ "s3.root.path" = "${s3VaultName}",
+ "s3.bucket" = "${getS3BucketName()}",
+ "s3.external_endpoint" = "",
+ "provider" = "${getS3Provider()}",
+ "use_path_style" = "false"
+ );
+ """
+ }
+ }
+
+ def future2 = thread("threadName2") {
+ for (int i = 0; i < 100; i++) {
+ sql """
+ CREATE STORAGE VAULT IF NOT EXISTS ${s3VaultName}
+ PROPERTIES (
+ "type"="S3",
+ "s3.endpoint"="${getS3Endpoint()}",
+ "s3.region" = "${getS3Region()}",
+ "s3.access_key" = "${getS3AK()}",
+ "s3.secret_key" = "${getS3SK()}",
+ "s3.root.path" = "${s3VaultName}",
+ "s3.bucket" = "${getS3BucketName()}",
+ "s3.external_endpoint" = "",
+ "provider" = "${getS3Provider()}",
+ "use_path_style" = "false"
+ );
+ """
+ }
+ }
+
+ def future3 = thread("threadName3") {
+ for (int i = 0; i < 100; i++) {
+ sql """
+ CREATE STORAGE VAULT IF NOT EXISTS ${s3VaultName}
+ PROPERTIES (
+ "type"="S3",
+ "s3.endpoint"="${getS3Endpoint()}",
+ "s3.region" = "${getS3Region()}",
+ "s3.access_key" = "${getS3AK()}",
+ "s3.secret_key" = "${getS3SK()}",
+ "s3.root.path" = "${s3VaultName}",
+ "s3.bucket" = "${getS3BucketName()}",
+ "s3.external_endpoint" = "",
+ "provider" = "${getS3Provider()}",
+ "use_path_style" = "false"
+ );
+ """
+ }
+ }
+
+ def future4 = thread("threadName4") {
+ for (int i = 0; i < 100; i++) {
+ sql """
+ CREATE STORAGE VAULT IF NOT EXISTS ${s3VaultName}
+ PROPERTIES (
+ "type"="S3",
+ "s3.endpoint"="${getS3Endpoint()}",
+ "s3.region" = "${getS3Region()}",
+ "s3.access_key" = "${getS3AK()}",
+ "s3.secret_key" = "${getS3SK()}",
+ "s3.root.path" = "${s3VaultName}",
+ "s3.bucket" = "${getS3BucketName()}",
+ "s3.external_endpoint" = "",
+ "provider" = "${getS3Provider()}",
+ "use_path_style" = "false"
+ );
+ """
+ }
+ }
+
+ // equals to combineFutures([future1, future2, future3, future4]), which
[] is a Iterable<ListenableFuture>
+ def combineFuture = combineFutures(future1, future2, future3, future4)
+ // or you can use lazyCheckThread action(see lazyCheck_action.groovy), and
not have to check exception from futures.
+ List<List<List<Object>>> result = combineFuture.get()
+ logger.info("${result}")
+
+ boolean s3VaultExisted = false;
+ def vaults_info = try_sql """ SHOW STORAGE VAULTS """
+
+ for (int i = 0; i < vaults_info.size(); i++) {
+ def name = vaults_info[i][0]
+ if (name.equals(s3VaultName)) {
+ s3VaultExisted = true;
+ }
+ }
+ assertTrue(s3VaultExisted)
+}
diff --git
a/regression-test/suites/vault_p0/concurent/test_default_vault_concurrenlty.groovy
b/regression-test/suites/vault_p0/concurent/test_default_vault_concurrenlty.groovy
new file mode 100644
index 00000000000..2ce094ea668
--- /dev/null
+++
b/regression-test/suites/vault_p0/concurent/test_default_vault_concurrenlty.groovy
@@ -0,0 +1,127 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+suite("test_default_vault_concurrently", "nonConcurrent") {
+ def suiteName = name;
+ if (!isCloudMode()) {
+ logger.info("skip ${name} case, because not cloud mode")
+ return
+ }
+
+ if (!enableStoragevault()) {
+ logger.info("skip ${name} case, because storage vault not enabled")
+ return
+ }
+
+ def randomStr = UUID.randomUUID().toString().replace("-", "")
+ def s3VaultName1 = "s3_" + randomStr + "_1"
+ def s3VaultName2 = "s3_" + randomStr + "_2"
+
+ sql """
+ CREATE STORAGE VAULT ${s3VaultName1}
+ PROPERTIES (
+ "type"="S3",
+ "s3.endpoint"="${getS3Endpoint()}",
+ "s3.region" = "${getS3Region()}",
+ "s3.access_key" = "${getS3AK()}",
+ "s3.secret_key" = "${getS3SK()}",
+ "s3.root.path" = "${s3VaultName1}",
+ "s3.bucket" = "${getS3BucketName()}",
+ "s3.external_endpoint" = "",
+ "provider" = "${getS3Provider()}",
+ "use_path_style" = "false"
+ );
+ """
+
+ sql """
+ CREATE STORAGE VAULT ${s3VaultName2}
+ PROPERTIES (
+ "type"="S3",
+ "s3.endpoint"="${getS3Endpoint()}",
+ "s3.region" = "${getS3Region()}",
+ "s3.access_key" = "${getS3AK()}",
+ "s3.secret_key" = "${getS3SK()}",
+ "s3.root.path" = "${s3VaultName2}",
+ "s3.bucket" = "${getS3BucketName()}",
+ "s3.external_endpoint" = "",
+ "provider" = "${getS3Provider()}",
+ "use_path_style" = "false"
+ );
+ """
+
+ def future1 = thread("threadName1") {
+ for (int i = 0; i < 200; i++) {
+ sql """SET ${s3VaultName1} AS DEFAULT STORAGE VAULT;"""
+ }
+ }
+
+ def future2 = thread("threadName2") {
+ for (int i = 0; i < 200; i++) {
+ sql """SET ${s3VaultName2} AS DEFAULT STORAGE VAULT;"""
+ }
+ }
+
+ def combineFuture = combineFutures(future1, future2)
+
+ List<List<List<Object>>> result = combineFuture.get()
+ logger.info("${result}")
+
+ def vaultsInfo = try_sql """ SHOW STORAGE VAULTS """
+ def found = false
+ def defaultVaultName = null
+ for (int i = 0; i < vaultsInfo.size(); i++) {
+ def name = vaultsInfo[i][0]
+ def isDefault = vaultsInfo[i][3]
+ if (isDefault.equalsIgnoreCase("true")) {
+ assertFalse(found)
+ found = true
+ defaultVaultName = name;
+ assertTrue(name.equalsIgnoreCase(s3VaultName1) ||
name.equalsIgnoreCase(s3VaultName2))
+ }
+ }
+ assertTrue(found)
+
+ sql """
+ CREATE TABLE ${defaultVaultName} (
+ C_CUSTKEY INTEGER NOT NULL,
+ C_NAME INTEGER NOT NULL
+ )
+ DUPLICATE KEY(C_CUSTKEY, C_NAME)
+ DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+ PROPERTIES (
+ "replication_num" = "1"
+ )
+ """
+
+ future1 = thread("threadName1") {
+ for (int i = 0; i < 50; i++) {
+ sql """ insert into ${defaultVaultName} values(${i}, ${i}); """
+ }
+ }
+
+ future2 = thread("threadName2") {
+ sql """ UNSET DEFAULT STORAGE VAULT; """
+ }
+
+ combineFuture = combineFutures(future1, future2)
+
+ result = combineFuture.get()
+ logger.info("${result}")
+}
diff --git a/regression-test/suites/vault_p0/create/test_create_vault.groovy
b/regression-test/suites/vault_p0/create/test_create_vault.groovy
index 49ea2565cc6..812e3aea438 100644
--- a/regression-test/suites/vault_p0/create/test_create_vault.groovy
+++ b/regression-test/suites/vault_p0/create/test_create_vault.groovy
@@ -53,6 +53,18 @@ suite("test_create_vault", "nonConcurrent") {
"""
}, "Incorrect vault name")
+ expectExceptionLike({
+ sql """
+ CREATE STORAGE VAULT '@#¥%*&-+=null.'
+ PROPERTIES (
+ "type"="S3",
+ "fs.defaultFS"="${getHmsHdfsFs()}",
+ "path_prefix" = "${exceed64LengthStr}",
+ "hadoop.username" = "${getHmsUser()}"
+ );
+ """
+ }, "Incorrect vault name")
+
sql """
CREATE STORAGE VAULT ${length64Str}
PROPERTIES (
@@ -206,6 +218,17 @@ suite("test_create_vault", "nonConcurrent") {
"hadoop.username" = "${getHmsUser()}"
);
"""
+ }, "Invalid argument s3.bucket")
+
+ expectExceptionLike({
+ sql """
+ CREATE STORAGE VAULT ${hdfsVaultName}
+ PROPERTIES (
+ "type"="hdfs",
+ "path_prefix" = "${hdfsVaultName}",
+ "hadoop.username" = "${getHmsUser()}"
+ );
+ """
}, "invalid fs_name")
// test `if not exist` and dup name hdfs vault
diff --git a/regression-test/suites/vault_p0/create/test_create_vault.groovy
b/regression-test/suites/vault_p0/create/test_create_vault_with_case_sensitive.groovy
similarity index 53%
copy from regression-test/suites/vault_p0/create/test_create_vault.groovy
copy to
regression-test/suites/vault_p0/create/test_create_vault_with_case_sensitive.groovy
index 49ea2565cc6..0a674c9f380 100644
--- a/regression-test/suites/vault_p0/create/test_create_vault.groovy
+++
b/regression-test/suites/vault_p0/create/test_create_vault_with_case_sensitive.groovy
@@ -15,10 +15,7 @@
// specific language governing permissions and limitations
// under the License.
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-suite("test_create_vault", "nonConcurrent") {
+suite("test_create_vault_with_case_sensitive", "nonConcurrent") {
def suiteName = name;
if (!isCloudMode()) {
logger.info("skip ${name} case, because not cloud mode")
@@ -34,121 +31,106 @@ suite("test_create_vault", "nonConcurrent") {
def s3VaultName = "s3_" + randomStr
def hdfsVaultName = "hdfs_" + randomStr
- def length64Str = Stream.generate(() -> String.valueOf('a'))
- .limit(32)
- .collect(Collectors.joining()) + randomStr
-
- def exceed64LengthStr = length64Str + "a"
-
- // test long length storage vault
+ // hdfs vault case
expectExceptionLike({
sql """
- CREATE STORAGE VAULT ${exceed64LengthStr}
+ CREATE STORAGE VAULT ${hdfsVaultName}
PROPERTIES (
- "type"="S3",
+ "type" = "aaaa",
"fs.defaultFS"="${getHmsHdfsFs()}",
- "path_prefix" = "${exceed64LengthStr}",
+ "path_prefix" = "${hdfsVaultName}",
"hadoop.username" = "${getHmsUser()}"
);
- """
- }, "Incorrect vault name")
-
- sql """
- CREATE STORAGE VAULT ${length64Str}
- PROPERTIES (
- "type"="HDFS",
- "fs.defaultFS"="${getHmsHdfsFs()}",
- "path_prefix" = "${length64Str}",
- "hadoop.username" = "${getHmsUser()}"
- );
- """
+ """
+ }, "Unsupported Storage Vault type")
expectExceptionLike({
sql """
- CREATE STORAGE VAULT ${s3VaultName}
+ CREATE STORAGE VAULT ${hdfsVaultName}
PROPERTIES (
- "type"="S3",
+ "type" = "s3",
"fs.defaultFS"="${getHmsHdfsFs()}",
- "path_prefix" = "${s3VaultName}",
+ "path_prefix" = "${hdfsVaultName}",
"hadoop.username" = "${getHmsUser()}"
);
- """
+ """
}, "Missing [s3.endpoint] in properties")
-
expectExceptionLike({
sql """
- CREATE STORAGE VAULT ${s3VaultName}
+ CREATE STORAGE VAULT ${hdfsVaultName}
PROPERTIES (
- "type"="S3",
+ "type" = "S3",
"fs.defaultFS"="${getHmsHdfsFs()}",
- "path_prefix" = "${s3VaultName}",
+ "path_prefix" = "${hdfsVaultName}",
"hadoop.username" = "${getHmsUser()}"
);
- """
+ """
}, "Missing [s3.endpoint] in properties")
- expectExceptionLike({
- sql """ CREATE STORAGE VAULT IF NOT EXISTS ${s3VaultName} PROPERTIES
(); """
- }, "mismatched input ')'")
+ sql """
+ CREATE STORAGE VAULT ${hdfsVaultName}
+ PROPERTIES (
+ "type" = "hdfs",
+ "fs.defaultFS"="${getHmsHdfsFs()}",
+ "path_prefix" = "${hdfsVaultName}",
+ "hadoop.username" = "${getHmsUser()}"
+ );
+ """
+ sql """
+ CREATE STORAGE VAULT ${hdfsVaultName.toUpperCase()}
+ PROPERTIES (
+ "TYPE" = "HDFS",
+ "FS.DEFAULTFS"="${getHmsHdfsFs()}",
+ "PATH_PREFIX" = "${hdfsVaultName.toUpperCase()}",
+ "HADOOP.USERNAME" = "${getHmsUser()}"
+ );
+ """
+ // s3 vault case
expectExceptionLike({
sql """
- CREATE TABLE ${s3VaultName} (
- C_CUSTKEY INTEGER NOT NULL,
- C_NAME INTEGER NOT NULL
- )
- DUPLICATE KEY(C_CUSTKEY, C_NAME)
- DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+ CREATE STORAGE VAULT ${s3VaultName}
PROPERTIES (
- "replication_num" = "1",
- "storage_vault_name" = "not_exist_vault"
- )
+ "type" = "bbbb",
+ "s3.endpoint"="${getS3Endpoint()}",
+ "s3.region" = "${getS3Region()}",
+ "s3.access_key" = "${getS3AK()}",
+ "s3.secret_key" = "${getS3SK()}",
+ "s3.root.path" = "${s3VaultName}",
+ "s3.bucket" = "${getS3BucketName()}",
+ "s3.external_endpoint" = "",
+ "provider" = "${getS3Provider()}",
+ "use_path_style" = "false"
+ );
"""
- }, "Storage vault 'not_exist_vault' does not exist")
+ }, "Unsupported Storage Vault type")
- // test s3.root.path cannot be empty
expectExceptionLike({
sql """
CREATE STORAGE VAULT ${s3VaultName}
PROPERTIES (
- "type"="S3",
+ "type" = "hdfs",
+ "FS.DEFAULTFS"="${getHmsHdfsFs()}",
"s3.endpoint"="${getS3Endpoint()}",
"s3.region" = "${getS3Region()}",
"s3.access_key" = "${getS3AK()}",
"s3.secret_key" = "${getS3SK()}",
- "s3.root.path" = "",
+ "s3.root.path" = "${s3VaultName}",
"s3.bucket" = "${getS3BucketName()}",
"s3.external_endpoint" = "",
"provider" = "${getS3Provider()}",
"use_path_style" = "false"
);
"""
- }, "cannot be empty")
-
- // test `if not exist` and dup name s3 vault
- sql """
- CREATE STORAGE VAULT ${s3VaultName}
- PROPERTIES (
- "type"="S3",
- "s3.endpoint"="${getS3Endpoint()}",
- "s3.region" = "${getS3Region()}",
- "s3.access_key" = "${getS3AK()}",
- "s3.secret_key" = "${getS3SK()}",
- "s3.root.path" = "${s3VaultName}",
- "s3.bucket" = "${getS3BucketName()}",
- "s3.external_endpoint" = "",
- "provider" = "${getS3Provider()}",
- "use_path_style" = "false"
- );
- """
+ }, "Invalid argument s3.region")
expectExceptionLike({
sql """
CREATE STORAGE VAULT ${s3VaultName}
PROPERTIES (
- "type"="S3",
+ "type" = "HDFS",
"s3.endpoint"="${getS3Endpoint()}",
"s3.region" = "${getS3Region()}",
"s3.access_key" = "${getS3AK()}",
@@ -160,12 +142,12 @@ suite("test_create_vault", "nonConcurrent") {
"use_path_style" = "false"
);
"""
- }, "already created")
+ }, "Invalid argument s3.region")
sql """
- CREATE STORAGE VAULT IF NOT EXISTS ${s3VaultName}
+ CREATE STORAGE VAULT ${s3VaultName}
PROPERTIES (
- "type"="S3",
+ "type" = "s3",
"s3.endpoint"="${getS3Endpoint()}",
"s3.region" = "${getS3Region()}",
"s3.access_key" = "${getS3AK()}",
@@ -178,99 +160,69 @@ suite("test_create_vault", "nonConcurrent") {
);
"""
- sql """
- CREATE TABLE ${s3VaultName} (
- C_CUSTKEY INTEGER NOT NULL,
- C_NAME INTEGER NOT NULL
- )
- DUPLICATE KEY(C_CUSTKEY, C_NAME)
- DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
- PROPERTIES (
- "replication_num" = "1",
- "storage_vault_name" = ${s3VaultName}
- )
- """
- sql """ insert into ${s3VaultName} values(1, 1); """
- sql """ sync;"""
- def result = sql """ select * from ${s3VaultName}; """
- assertEquals(result.size(), 1);
-
- // hdfs vault case
+ // S3.xx properties is case sensitive
expectExceptionLike({
sql """
- CREATE STORAGE VAULT ${hdfsVaultName}
+ CREATE STORAGE VAULT ${s3VaultName.toUpperCase()}
PROPERTIES (
- "type"="hdfs",
- "s3.bucket"="${getHmsHdfsFs()}",
- "path_prefix" = "${hdfsVaultName}",
- "hadoop.username" = "${getHmsUser()}"
+ "TYPE" = "S3",
+ "S3.ENDPOINT"="${getS3Endpoint()}",
+ "S3.REGION" = "${getS3Region()}",
+ "S3.ACCESS_KEY" = "${getS3AK()}",
+ "S3.SECRET_KEY" = "${getS3SK()}",
+ "S3.ROOT.PATH" = "${s3VaultName}",
+ "S3.BUCKET" = "${getS3BucketName()}",
+ "S3.EXTERNAL_ENDPOINT" = "",
+ "PROVIDER" = "${getS3Provider()}",
+ "USE_PATH_STYLE" = "false"
);
- """
- }, "invalid fs_name")
+ """
+ }, "Missing [s3.endpoint] in properties")
- // test `if not exist` and dup name hdfs vault
sql """
- CREATE STORAGE VAULT ${hdfsVaultName}
+ CREATE STORAGE VAULT ${s3VaultName.toUpperCase()}
PROPERTIES (
- "type"="HDFS",
- "fs.defaultFS"="${getHmsHdfsFs()}",
- "path_prefix" = "${hdfsVaultName}",
- "hadoop.username" = "${getHmsUser()}"
+ "TYPE" = "S3",
+ "s3.endpoint"="${getS3Endpoint()}",
+ "s3.region" = "${getS3Region()}",
+ "s3.access_key" = "${getS3AK()}",
+ "s3.secret_key" = "${getS3SK()}",
+ "s3.root.path" = "${s3VaultName}",
+ "s3.bucket" = "${getS3BucketName()}",
+ "s3.external_endpoint" = "",
+ "provider" = "${getS3Provider()}",
+ "use_path_style" = "false"
);
"""
- expectExceptionLike({
- sql """
- CREATE STORAGE VAULT ${hdfsVaultName}
- PROPERTIES (
- "type"="HDFS",
- "fs.defaultFS"="${getHmsHdfsFs()}",
- "path_prefix" = "${hdfsVaultName}",
- "hadoop.username" = "${getHmsUser()}"
- );
- """
- }, "already created")
+ def vaultInfos = try_sql """SHOW STORAGE VAULTS"""
- sql """
- CREATE STORAGE VAULT IF NOT EXISTS ${hdfsVaultName}
- PROPERTIES (
- "type"="HDFS",
- "fs.defaultFS"="${getHmsHdfsFs()}",
- "path_prefix" = "${hdfsVaultName}",
- "hadoop.username" = "${getHmsUser()}"
- );
- """
+ boolean hdfsVaultLowerExist = false;
+ boolean hdfsVaultUpperExist = false;
- sql """
- CREATE TABLE ${hdfsVaultName} (
- C_CUSTKEY INTEGER NOT NULL,
- C_NAME INTEGER NOT NULL
- )
- DUPLICATE KEY(C_CUSTKEY, C_NAME)
- DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
- PROPERTIES (
- "replication_num" = "1",
- "storage_vault_name" = ${hdfsVaultName}
- )
- """
- sql """ insert into ${hdfsVaultName} values(1, 1); """
- sql """ sync;"""
- result = sql """ select * from ${hdfsVaultName}; """
- assertEquals(result.size(), 1);
+ boolean s3VaultLowerExist = false;
+ boolean s3VaultUpperExist = false;
+
+ for (int i = 0; i < vaultInfos.size(); i++) {
+ logger.info("vault info: ${vaultInfos[i]}")
+ if (vaultInfos[i][0].equals(hdfsVaultName)) {
+ hdfsVaultLowerExist = true
+ }
- boolean hdfsVaultExisted = false;
- boolean s3VaultExisted = false;
- def vaults_info = try_sql """ SHOW STORAGE VAULTS """
+ if (vaultInfos[i][0].equals(hdfsVaultName.toUpperCase())) {
+ hdfsVaultUpperExist = true
+ }
- for (int i = 0; i < vaults_info.size(); i++) {
- def name = vaults_info[i][0]
- if (name.equals(hdfsVaultName)) {
- hdfsVaultExisted = true;
+ if (vaultInfos[i][0].equals(s3VaultName)) {
+ s3VaultLowerExist = true
}
- if (name.equals(s3VaultName)) {
- s3VaultExisted = true;
+
+ if (vaultInfos[i][0].equals(s3VaultName.toUpperCase())) {
+ s3VaultUpperExist = true
}
}
- assertTrue(hdfsVaultExisted)
- assertTrue(s3VaultExisted)
-}
+ assertTrue(hdfsVaultLowerExist)
+ assertTrue(hdfsVaultUpperExist)
+ assertTrue(s3VaultLowerExist)
+ assertTrue(s3VaultUpperExist)
+}
\ No newline at end of file
diff --git
a/regression-test/suites/vault_p0/create/test_create_vault_with_kerberos.groovy
b/regression-test/suites/vault_p0/create/test_create_vault_with_kerberos.groovy
new file mode 100644
index 00000000000..d6f11f96cd7
--- /dev/null
+++
b/regression-test/suites/vault_p0/create/test_create_vault_with_kerberos.groovy
@@ -0,0 +1,106 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("test_create_vault_with_kerberos", "nonConcurrent") {
+ def suiteName = name;
+ if (!isCloudMode()) {
+ logger.info("skip ${name} case, because not cloud mode")
+ return
+ }
+
+ if (!enableStoragevault()) {
+ logger.info("skip ${name} case, because storage vault not enabled")
+ return
+ }
+
+ def randomStr = UUID.randomUUID().toString().replace("-", "")
+ def hdfsVaultName = "hdfs_" + randomStr
+ def hdfsVaultName2 = "hdfs2_" + randomStr
+ def tableName = "tbl_" + randomStr
+ def tableName2 = "tbl2_" + randomStr
+
+ sql """
+ CREATE STORAGE VAULT ${hdfsVaultName}
+ PROPERTIES (
+ "type" = "hdfs",
+ "fs.defaultFS"="${getHmsHdfsFs()}",
+ "path_prefix" = "${hdfsVaultName}",
+ "hadoop.username" = "not_exist_user"
+ );
+ """
+
+ sql """
+ CREATE TABLE ${tableName} (
+ C_CUSTKEY INTEGER NOT NULL,
+ C_NAME INTEGER NOT NULL
+ )
+ DUPLICATE KEY(C_CUSTKEY, C_NAME)
+ DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+ PROPERTIES (
+ "replication_num" = "1",
+ "storage_vault_name" = ${hdfsVaultName}
+ )
+ """
+
+ expectExceptionLike({
+ sql """ insert into ${tableName} values(1, 1); """
+ }, "Permission denied: user=not_exist_user")
+
+ expectExceptionLike({
+ sql """
+ CREATE STORAGE VAULT ${hdfsVaultName}_2
+ PROPERTIES (
+ "type" = "hdfs",
+ "fs.defaultFS"="${getHmsHdfsFs()}",
+ "path_prefix" = "${hdfsVaultName}_2",
+ "hadoop.username" = "${getHmsUser()}",
+ "hadoop.security.authentication" = "kerberos"
+ );
+ """
+ }, "hadoop.kerberos.principal is required for kerberos")
+
+
+ sql """
+ CREATE STORAGE VAULT ${hdfsVaultName2}
+ PROPERTIES (
+ "type" = "hdfs",
+ "fs.defaultFS"="${getHmsHdfsFs()}",
+ "path_prefix" = "${hdfsVaultName2}",
+ "hadoop.username" = "${getHmsUser()}",
+ "hadoop.security.authentication" = "kerberos",
+ "hadoop.kerberos.principal" = "hadoop/127.0.0.1@XXX",
+ "hadoop.kerberos.keytab" = "/etc/emr.keytab"
+ );
+ """
+
+ sql """
+ CREATE TABLE ${tableName2} (
+ C_CUSTKEY INTEGER NOT NULL,
+ C_NAME INTEGER NOT NULL
+ )
+ DUPLICATE KEY(C_CUSTKEY, C_NAME)
+ DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+ PROPERTIES (
+ "replication_num" = "1",
+ "storage_vault_name" = ${hdfsVaultName2}
+ )
+ """
+
+ expectExceptionLike({
+ sql """ insert into ${tableName2} values(1, 1); """
+ }, "vault id not found, maybe not sync")
+}
\ No newline at end of file
diff --git
a/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_role.groovy
b/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_multi_roles.groovy
similarity index 58%
copy from
regression-test/suites/vault_p0/privilege/test_vault_privilege_with_role.groovy
copy to
regression-test/suites/vault_p0/privilege/test_vault_privilege_with_multi_roles.groovy
index 7192dc40aed..5bbeaf06c66 100644
---
a/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_role.groovy
+++
b/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_multi_roles.groovy
@@ -17,7 +17,7 @@
import java.util.stream.Collectors;
-suite("test_vault_privilege_with_role", "nonConcurrent") {
+suite("test_vault_privilege_with_multi_roles", "nonConcurrent") {
def suiteName = name;
if (!isCloudMode()) {
logger.info("skip ${suiteName} case, because not cloud mode")
@@ -35,15 +35,22 @@ suite("test_vault_privilege_with_role", "nonConcurrent") {
def userName = "user_${randomStr}"
def userPassword = "Cloud12345"
- def roleName = "role_${randomStr}"
- def tableName = "tbl_${randomStr}"
+ def roleName1 = "role1_${randomStr}"
+ def roleName2 = "role2_${randomStr}"
+ def tableName1 = "tbl1_${randomStr}"
+ def tableName2 = "tbl2_${randomStr}"
+ def tableName3 = "tbl3_${randomStr}"
- sql """DROP TABLE IF EXISTS ${dbName}.${tableName}"""
+ sql """DROP TABLE IF EXISTS ${dbName}.${tableName1}"""
+ sql """DROP TABLE IF EXISTS ${dbName}.${tableName2}"""
sql """DROP USER IF EXISTS ${userName}"""
- sql """DROP ROLE IF EXISTS ${roleName}"""
+ sql """DROP ROLE IF EXISTS ${roleName1}"""
+ sql """DROP ROLE IF EXISTS ${roleName2}"""
- sql """CREATE ROLE ${roleName}"""
- sql """CREATE USER ${userName} identified by '${userPassword}' DEFAULT
ROLE '${roleName}'"""
+ sql """CREATE ROLE ${roleName1}"""
+ sql """CREATE ROLE ${roleName2}"""
+
+ sql """CREATE USER ${userName} identified by '${userPassword}'"""
sql """GRANT create_priv ON *.*.* TO '${userName}'; """
sql """
@@ -59,7 +66,7 @@ suite("test_vault_privilege_with_role", "nonConcurrent") {
connect(userName, userPassword, context.config.jdbcUrl) {
expectExceptionLike({
sql """
- CREATE TABLE IF NOT EXISTS ${dbName}.${tableName} (
+ CREATE TABLE IF NOT EXISTS ${dbName}.${tableName1} (
C_CUSTKEY INTEGER NOT NULL,
C_NAME INTEGER NOT NULL
)
@@ -73,11 +80,12 @@ suite("test_vault_privilege_with_role", "nonConcurrent") {
}, "denied")
}
- sql """ GRANT usage_priv ON STORAGE VAULT '${hdfsVaultName}' TO ROLE
'${roleName}';"""
+ sql """ GRANT usage_priv ON STORAGE VAULT '${hdfsVaultName}' TO ROLE
'${roleName1}';"""
+ sql """ GRANT '${roleName1}' TO '${userName}';"""
connect(userName, userPassword, context.config.jdbcUrl) {
sql """
- CREATE TABLE IF NOT EXISTS ${dbName}.${tableName} (
+ CREATE TABLE IF NOT EXISTS ${dbName}.${tableName1} (
C_CUSTKEY INTEGER NOT NULL,
C_NAME INTEGER NOT NULL
)
@@ -94,16 +102,16 @@ suite("test_vault_privilege_with_role", "nonConcurrent") {
sql """ GRANT USAGE_PRIV ON COMPUTE GROUP '%' TO '${userName}';"""
connect(userName, userPassword, context.config.jdbcUrl) {
sql """
- insert into ${dbName}.${tableName} values(1, 1);
- select * from ${dbName}.${tableName};
+ insert into ${dbName}.${tableName1} values(1, 1);
+ select * from ${dbName}.${tableName1};
"""
}
- sql """REVOKE usage_priv ON STORAGE VAULT '${hdfsVaultName}' FROM ROLE
'${roleName}';"""
+ sql """REVOKE usage_priv ON STORAGE VAULT '${hdfsVaultName}' FROM ROLE
'${roleName1}';"""
connect(userName, userPassword, context.config.jdbcUrl) {
expectExceptionLike({
sql """
- CREATE TABLE ${dbName}.${tableName}_2 (
+ CREATE TABLE ${dbName}.${tableName2} (
C_CUSTKEY INTEGER NOT NULL,
C_NAME INTEGER NOT NULL
)
@@ -117,4 +125,54 @@ suite("test_vault_privilege_with_role", "nonConcurrent") {
}, "denied")
}
+ sql """ GRANT usage_priv ON STORAGE VAULT '${hdfsVaultName}' TO ROLE
'${roleName2}';"""
+ sql """ GRANT '${roleName2}' TO '${userName}';"""
+
+ connect(userName, userPassword, context.config.jdbcUrl) {
+ sql """
+ CREATE TABLE IF NOT EXISTS ${dbName}.${tableName2} (
+ C_CUSTKEY INTEGER NOT NULL,
+ C_NAME INTEGER NOT NULL
+ )
+ DUPLICATE KEY(C_CUSTKEY, C_NAME)
+ DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+ PROPERTIES (
+ "replication_num" = "1",
+ "storage_vault_name" = ${hdfsVaultName}
+ )
+ """
+ }
+
+ connect(userName, userPassword, context.config.jdbcUrl) {
+ sql """
+ insert into ${dbName}.${tableName1} values(1, 1);
+ select * from ${dbName}.${tableName1};
+ """
+ }
+
+ connect(userName, userPassword, context.config.jdbcUrl) {
+ sql """
+ insert into ${dbName}.${tableName2} values(1, 1);
+ select * from ${dbName}.${tableName2};
+ """
+ }
+
+ sql """REVOKE usage_priv ON STORAGE VAULT '${hdfsVaultName}' FROM ROLE
'${roleName2}';"""
+
+ connect(userName, userPassword, context.config.jdbcUrl) {
+ expectExceptionLike({
+ sql """
+ CREATE TABLE ${dbName}.${tableName3} (
+ C_CUSTKEY INTEGER NOT NULL,
+ C_NAME INTEGER NOT NULL
+ )
+ DUPLICATE KEY(C_CUSTKEY, C_NAME)
+ DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+ PROPERTIES (
+ "replication_num" = "1",
+ "storage_vault_name" = ${hdfsVaultName}
+ )
+ """
+ }, "denied")
+ }
}
\ No newline at end of file
diff --git
a/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_role.groovy
b/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_role.groovy
index 7192dc40aed..792a627c705 100644
---
a/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_role.groovy
+++
b/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_role.groovy
@@ -95,8 +95,9 @@ suite("test_vault_privilege_with_role", "nonConcurrent") {
connect(userName, userPassword, context.config.jdbcUrl) {
sql """
insert into ${dbName}.${tableName} values(1, 1);
- select * from ${dbName}.${tableName};
"""
+ def result = sql """ select * from ${dbName}.${tableName}; """
+ assertEquals(result.size(), 1)
}
sql """REVOKE usage_priv ON STORAGE VAULT '${hdfsVaultName}' FROM ROLE
'${roleName}';"""
@@ -117,4 +118,79 @@ suite("test_vault_privilege_with_role", "nonConcurrent") {
}, "denied")
}
+ def hdfsVaultName2 = "hdfs2_" + randomStr
+ def userName2 = "user2_${randomStr}"
+ def userPassword2 = "Cloud789654"
+ def roleName2 = "role2_${randomStr}"
+ def tableName2 = "tbl2_${randomStr}"
+
+ sql """DROP TABLE IF EXISTS ${dbName}.${tableName2}"""
+ sql """DROP USER IF EXISTS ${userName2}"""
+ sql """DROP ROLE IF EXISTS ${roleName2}"""
+
+ sql """CREATE ROLE ${roleName2}"""
+ sql """CREATE USER ${userName2} identified by '${userPassword2}'"""
+ sql """GRANT create_priv ON *.*.* TO '${userName2}'; """
+ sql """GRANT usage_priv ON STORAGE VAULT '${hdfsVaultName2}' TO
'${userName2}';"""
+
+ sql """
+ CREATE STORAGE VAULT ${hdfsVaultName2}
+ PROPERTIES (
+ "type"="HDFS",
+ "fs.defaultFS"="${getHmsHdfsFs()}",
+ "path_prefix" = "${hdfsVaultName2}",
+ "hadoop.username" = "${getHmsUser()}"
+ );
+ """
+
+ sql """ GRANT usage_priv ON STORAGE VAULT '${hdfsVaultName2}' TO ROLE
'${roleName2}';"""
+
+ connect(userName2, userPassword2, context.config.jdbcUrl) {
+ sql """
+ CREATE TABLE IF NOT EXISTS ${dbName}.${tableName2} (
+ C_CUSTKEY INTEGER NOT NULL,
+ C_NAME INTEGER NOT NULL
+ )
+ DUPLICATE KEY(C_CUSTKEY, C_NAME)
+ DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+ PROPERTIES (
+ "replication_num" = "1",
+ "storage_vault_name" = ${hdfsVaultName2}
+ )
+ """
+ }
+
+ sql """ GRANT load_priv,select_priv ON *.*.* TO '${userName2}';"""
+ sql """ GRANT USAGE_PRIV ON COMPUTE GROUP '%' TO '${userName2}';"""
+ connect(userName2, userPassword2, context.config.jdbcUrl) {
+ sql """
+ insert into ${dbName}.${tableName2} values(1, 1);
+ """
+ def result = sql """ select * from ${dbName}.${tableName2}; """
+ assertEquals(result.size(), 1)
+ }
+
+ sql """REVOKE usage_priv ON STORAGE VAULT '${hdfsVaultName2}' FROM ROLE
'${roleName2}';"""
+ connect(userName2, userPassword2, context.config.jdbcUrl) {
+ sql """
+ CREATE TABLE IF NOT EXISTS ${dbName}.${tableName2}_2 (
+ C_CUSTKEY INTEGER NOT NULL,
+ C_NAME INTEGER NOT NULL
+ )
+ DUPLICATE KEY(C_CUSTKEY, C_NAME)
+ DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1
+ PROPERTIES (
+ "replication_num" = "1",
+ "storage_vault_name" = ${hdfsVaultName2}
+ )
+ """
+ }
+
+ connect(userName2, userPassword2, context.config.jdbcUrl) {
+ sql """
+ insert into ${dbName}.${tableName2} values(2, 2);
+ """
+ def result = sql """ select * from ${dbName}.${tableName2}; """
+ assertEquals(result.size(), 2)
+ }
}
\ No newline at end of file
diff --git
a/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_user.groovy
b/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_user.groovy
index 89a158323be..e22c88c3d7b 100644
---
a/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_user.groovy
+++
b/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_user.groovy
@@ -77,6 +77,14 @@ suite("test_vault_privilege_with_user", "nonConcurrent") {
}, "denied")
}
+ connect(hdfsUser, 'Cloud12345', context.config.jdbcUrl) {
+ expectExceptionLike({
+ sql """
+ UNSET DEFAULT STORAGE VAULT;
+ """
+ }, "denied")
+ }
+
connect(hdfsUser, 'Cloud12345', context.config.jdbcUrl) {
expectExceptionLike({
sql """
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]