This is an automated email from the ASF dual-hosted git repository.
dataroaring pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/master by this push:
new 65becbe54ef [fix](cloud) Fix cloud create table need cluster auth and
no cluster … (#31772)
65becbe54ef is described below
commit 65becbe54ef8b0e47782d85ef4bb8cb81fdbbc25
Author: deardeng <[email protected]>
AuthorDate: Fri Mar 8 18:52:21 2024 +0800
[fix](cloud) Fix cloud create table need cluster auth and no cluster …
(#31772)
* [fix](cloud) Fix cloud create table need cluster auth and no cluster auth
select throw npe
```
2024-03-04 15:24:22,555 DEBUG (mysql-nio-pool-9|574)
[Role.checkCloudPriv():411] failed to get wanted privs: priv predicate: OR,
Admin_priv Usage_priv Cluster_Usage_priv , granted:
2024-03-04 15:24:22,555 WARN (mysql-nio-pool-9|574)
[ConnectContext.getCloudCluster():1094] cant get a valid cluster for user
'nereids_user'@'%' to use
2024-03-04 15:24:22,555 DEBUG (mysql-nio-pool-9|574)
[CloudCoordinator.prepare():67] get cluster by context null
2024-03-04 15:24:22,555 WARN (mysql-nio-pool-9|574)
[CloudCoordinator.prepare():75] invalid clusterName: null
2024-03-04 15:24:22,555 WARN (mysql-nio-pool-9|574)
[StmtExecutor.sendResult():1700] cancel fragment
query_id:f4e0832dc2a94f0c-bba7afaee9bdb38b cause null
2024-03-04 15:24:22,555 WARN (mysql-nio-pool-9|574)
[Coordinator.cancel():1473] Cancel execution of query
f4e0832dc2a94f0c-bba7afaee9bdb38b, this is a outside invoke
2024-03-04 15:24:22,555 INFO (mysql-nio-pool-9|574)
[QueryQueue.returnToken():154] wgId= 1, version=0,maxConcurrency=2147483647,
maxQueueSize=0, queueTimeout=0, currentRunningQueryNum=5,
currentWaitingQueryNum=0
2024-03-04 15:24:22,555 DEBUG (mysql-nio-pool-9|574)
[QeProcessorImpl.unregisterQuery():145] Deregister query id
f4e0832dc2a94f0c-bba7afaee9bdb38b
2024-03-04 15:24:22,555 WARN (mysql-nio-pool-9|574)
[ConnectProcessor.handleQueryException():361] Process one query failed because
unknown reason:
java.lang.NullPointerException: null
at
org.apache.doris.qe.Coordinator.computeScanRangeAssignment(Coordinator.java:2280)
~[doris-fe.jar:1.2-SNAPSHOT]
at
org.apache.doris.qe.Coordinator.processFragmentAssignmentAndParams(Coordinator.java:578)
~[doris-fe.jar:1.2-SNAPSHOT]
at
org.apache.doris.cloud.qe.CloudCoordinator.processFragmentAssignmentAndParams(CloudCoordinator.java:84)
~[doris-fe.jar:1.2-SNAPSHOT]
at
org.apache.doris.qe.Coordinator.execInternal(Coordinator.java:651)
~[doris-fe.jar:1.2-SNAPSHOT]
at org.apache.doris.qe.Coordinator.exec(Coordinator.java:630)
~[doris-fe.jar:1.2-SNAPSHOT]
at
org.apache.doris.qe.StmtExecutor.sendResult(StmtExecutor.java:1589)
~[doris-fe.jar:1.2-SNAPSHOT]
at
org.apache.doris.qe.StmtExecutor.handleQueryStmt(StmtExecutor.java:1562)
~[doris-fe.jar:1.2-SNAPSHOT]
at
org.apache.doris.qe.StmtExecutor.handleQueryWithRetry(StmtExecutor.java:711)
~[doris-fe.jar:1.2-SNAPSHOT]
at
org.apache.doris.qe.StmtExecutor.executeByNereids(StmtExecutor.java:663)
~[doris-fe.jar:1.2-SNAPSHOT]
at org.apache.doris.qe.StmtExecutor.execute(StmtExecutor.java:496)
~[doris-fe.jar:1.2-SNAPSHOT]
at org.apache.doris.qe.StmtExecutor.execute(StmtExecutor.java:475)
~[doris-fe.jar:1.2-SNAPSHOT]
at
org.apache.doris.qe.ConnectProcessor.executeQuery(ConnectProcessor.java:279)
~[doris-fe.jar:1.2-SNAPSHOT]
at
org.apache.doris.qe.ConnectProcessor.handleQuery(ConnectProcessor.java:197)
~[doris-fe.jar:1.2-SNAPSHOT]
at
org.apache.doris.qe.MysqlConnectProcessor.handleQuery(MysqlConnectProcessor.java:176)
~[doris-fe.jar:1.2-SNAPSHOT]
at
org.apache.doris.qe.MysqlConnectProcessor.dispatch(MysqlConnectProcessor.java:205)
~[doris-fe.jar:1.2-SNAPSHOT]
at
org.apache.doris.qe.MysqlConnectProcessor.processOnce(MysqlConnectProcessor.java:258)
~[doris-fe.jar:1.2-SNAPSHOT]
at
org.apache.doris.mysql.ReadListener.lambda$handleEvent$0(ReadListener.java:52)
~[doris-fe.jar:1.2-SNAPSHOT]
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
~[?:1.8.0_131]
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
~[?:1.8.0_131]
at java.lang.Thread.run(Thread.java:748) ~[?:1.8.0_131]
```
* fix more regression case run in cloud
* fix review
* fix
---
.../main/java/org/apache/doris/catalog/Tablet.java | 2 +-
.../apache/doris/catalog/TabletInvertedIndex.java | 6 +-
.../apache/doris/cloud/catalog/CloudTablet.java | 10 +++
.../apache/doris/cloud/qe/CloudCoordinator.java | 77 ++++++++++++++++++++--
.../java/org/apache/doris/qe/ConnectContext.java | 3 +-
.../main/java/org/apache/doris/qe/Coordinator.java | 64 +-----------------
.../account_p0/test_nereids_authentication.groovy | 9 ++-
.../external_table_p0/jdbc/test_jdbc_call.groovy | 9 +++
.../suites/javaudf_p0/test_javaudf_auth.groovy | 8 +++
.../authorization/view_authorization.groovy | 8 +++
.../insert_into_table/insert_auth.groovy | 8 +++
.../suites/query_p0/test_row_policy.groovy | 8 +++
12 files changed, 141 insertions(+), 71 deletions(-)
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Tablet.java
b/fe/fe-core/src/main/java/org/apache/doris/catalog/Tablet.java
index 6c305587806..376f7649c68 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Tablet.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Tablet.java
@@ -84,7 +84,7 @@ public class Tablet extends MetaObject implements Writable {
}
@SerializedName(value = "id")
- private long id;
+ protected long id;
@SerializedName(value = "replicas")
protected List<Replica> replicas;
@SerializedName(value = "checkedVersion")
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletInvertedIndex.java
b/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletInvertedIndex.java
index 9039d691bb3..5a72d36c038 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletInvertedIndex.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletInvertedIndex.java
@@ -589,9 +589,11 @@ public class TabletInvertedIndex {
Preconditions.checkState(tabletMetaMap.containsKey(tabletId),
"tablet " + tabletId + " not exists, replica " +
replica.getId()
+ ", backend " + replica.getBackendId());
- replicaMetaTable.put(tabletId, replica.getBackendId(), replica);
+ // cloud mode, create table not need backendId, represent with -1.
+ long backendId = Config.isCloudMode() ? -1 :
replica.getBackendId();
+ replicaMetaTable.put(tabletId, backendId, replica);
replicaToTabletMap.put(replica.getId(), tabletId);
- backingReplicaMetaTable.put(replica.getBackendId(), tabletId,
replica);
+ backingReplicaMetaTable.put(backendId, tabletId, replica);
if (LOG.isDebugEnabled()) {
LOG.debug("add replica {} of tablet {} in backend {}",
replica.getId(), tabletId, replica.getBackendId());
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudTablet.java
b/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudTablet.java
index 28384bfdcf3..5a6d30f8199 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudTablet.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/catalog/CloudTablet.java
@@ -17,6 +17,7 @@
package org.apache.doris.cloud.catalog;
+import org.apache.doris.catalog.Env;
import org.apache.doris.catalog.Replica;
import org.apache.doris.catalog.Tablet;
import org.apache.doris.common.InternalErrorCode;
@@ -86,4 +87,13 @@ public class CloudTablet extends Tablet {
return delete || !hasBackend;
}
+ public void addReplica(Replica replica, boolean isRestore) {
+ if (isLatestReplicaAndDeleteOld(replica)) {
+ replicas.add(replica);
+ if (!isRestore) {
+ Env.getCurrentInvertedIndex().addReplica(id, replica);
+ }
+ }
+ }
+
}
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/cloud/qe/CloudCoordinator.java
b/fe/fe-core/src/main/java/org/apache/doris/cloud/qe/CloudCoordinator.java
index c896533cc09..02f1f906b3b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/cloud/qe/CloudCoordinator.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/cloud/qe/CloudCoordinator.java
@@ -20,21 +20,32 @@ package org.apache.doris.cloud.qe;
import org.apache.doris.analysis.Analyzer;
import org.apache.doris.analysis.DescriptorTable;
import org.apache.doris.catalog.Env;
+import org.apache.doris.catalog.OlapTable;
import org.apache.doris.cloud.catalog.CloudEnv;
+import org.apache.doris.cloud.catalog.CloudPartition;
import org.apache.doris.cloud.system.CloudSystemInfoService;
+import org.apache.doris.common.UserException;
import org.apache.doris.nereids.stats.StatsErrorEstimator;
+import org.apache.doris.planner.OlapScanNode;
import org.apache.doris.planner.PlanFragment;
import org.apache.doris.planner.Planner;
import org.apache.doris.planner.ScanNode;
import org.apache.doris.qe.ConnectContext;
import org.apache.doris.qe.Coordinator;
+import org.apache.doris.rpc.RpcException;
import org.apache.doris.thrift.TUniqueId;
import com.google.common.base.Strings;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
+import java.util.ArrayList;
+import java.util.HashSet;
import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
public class CloudCoordinator extends Coordinator {
private static final Logger LOG = LogManager.getLogger(Coordinator.class);
@@ -49,7 +60,8 @@ public class CloudCoordinator extends Coordinator {
super(jobId, queryId, descTable, fragments, scanNodes, timezone,
loadZeroTolerance);
}
- protected void prepare() {
+ @Override
+ protected void prepare() throws Exception {
String cluster = null;
ConnectContext context = ConnectContext.get();
if (context != null) {
@@ -77,11 +89,9 @@ public class CloudCoordinator extends Coordinator {
}
this.idToBackend = ((CloudSystemInfoService)
Env.getCurrentSystemInfo()).getCloudIdToBackend(cluster);
+
super.prepare();
- }
- protected void processFragmentAssignmentAndParams() throws Exception {
- super.processFragmentAssignmentAndParams();
if (idToBackend == null || idToBackend.isEmpty()) {
LOG.warn("no available backends, idToBackend {}", idToBackend);
String clusterName = ConnectContext.get() != null
@@ -90,4 +100,63 @@ public class CloudCoordinator extends Coordinator {
+ clusterName);
}
}
+
+ @Override
+ protected void computeScanRangeAssignment() throws Exception {
+ setVisibleVersionForOlapScanNode();
+ super.computeScanRangeAssignment();
+ }
+
+ // In cloud mode, meta read lock is not enough to keep a snapshot of the
partition versions.
+ // After all scan node are collected, it is possible to gain a snapshot of
the partition version.
+ private void setVisibleVersionForOlapScanNode() throws RpcException,
UserException {
+ List<CloudPartition> partitions = new ArrayList<>();
+ Set<Long> partitionSet = new HashSet<>();
+ for (ScanNode node : scanNodes) {
+ if (!(node instanceof OlapScanNode)) {
+ continue;
+ }
+
+ OlapScanNode scanNode = (OlapScanNode) node;
+ OlapTable table = scanNode.getOlapTable();
+ for (Long id : scanNode.getSelectedPartitionIds()) {
+ if (!partitionSet.contains(id)) {
+ partitionSet.add(id);
+ partitions.add((CloudPartition) table.getPartition(id));
+ }
+ }
+ }
+
+ if (partitions.isEmpty()) {
+ return;
+ }
+
+ List<Long> versions =
CloudPartition.getSnapshotVisibleVersion(partitions);
+ assert versions.size() == partitions.size() : "the got num versions is
not equals to acquired num versions";
+ if (versions.stream().anyMatch(x -> x <= 0)) {
+ int size = versions.size();
+ for (int i = 0; i < size; ++i) {
+ if (versions.get(i) <= 0) {
+ LOG.warn("partition {} getVisibleVersion error, the
visibleVersion is {}",
+ partitions.get(i).getId(), versions.get(i));
+ throw new UserException("partition " +
partitions.get(i).getId()
+ + " getVisibleVersion error, the visibleVersion is " +
versions.get(i));
+ }
+ }
+ }
+
+ // ATTN: the table ids are ignored here because the both id are
allocated from a same id generator.
+ Map<Long, Long> visibleVersionMap = IntStream.range(0, versions.size())
+ .boxed()
+ .collect(Collectors.toMap(i -> partitions.get(i).getId(),
versions::get));
+
+ for (ScanNode node : scanNodes) {
+ if (!(node instanceof OlapScanNode)) {
+ continue;
+ }
+
+ OlapScanNode scanNode = (OlapScanNode) node;
+ scanNode.updateScanRangeVersions(visibleVersionMap);
+ }
+ }
}
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectContext.java
b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectContext.java
index fa212b83d6a..5779d1ffba8 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectContext.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ConnectContext.java
@@ -37,6 +37,7 @@ import org.apache.doris.catalog.Type;
import org.apache.doris.cloud.system.CloudSystemInfoService;
import org.apache.doris.cluster.ClusterNamespace;
import org.apache.doris.common.Config;
+import org.apache.doris.common.ErrorCode;
import org.apache.doris.common.util.DebugUtil;
import org.apache.doris.common.util.TimeUtils;
import org.apache.doris.datasource.CatalogIf;
@@ -1093,10 +1094,8 @@ public class ConnectContext {
if (Strings.isNullOrEmpty(cluster)) {
LOG.warn("cant get a valid cluster for user {} to use",
getCurrentUserIdentity());
- /*
getState().setError(ErrorCode.ERR_NO_CLUSTER_ERROR,
"Cant get a Valid cluster for you to use, plz connect
admin");
- */
} else {
this.cloudCluster = cluster;
LOG.info("finally set context cluster name {}", cloudCluster);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java
b/fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java
index f4cd2f13589..42a1455460f 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java
@@ -22,8 +22,6 @@ import org.apache.doris.analysis.DescriptorTable;
import org.apache.doris.analysis.StorageBackend;
import org.apache.doris.catalog.Env;
import org.apache.doris.catalog.FsBroker;
-import org.apache.doris.catalog.OlapTable;
-import org.apache.doris.cloud.catalog.CloudPartition;
import org.apache.doris.common.Config;
import org.apache.doris.common.Pair;
import org.apache.doris.common.Reference;
@@ -149,7 +147,6 @@ import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Collectors;
-import java.util.stream.IntStream;
import java.util.stream.Stream;
public class Coordinator implements CoordInterface {
@@ -211,7 +208,7 @@ public class Coordinator implements CoordInterface {
private final List<BackendExecState> needCheckBackendExecStates =
Lists.newArrayList();
private final List<PipelineExecContext> needCheckPipelineExecContexts =
Lists.newArrayList();
private ResultReceiver receiver;
- private final List<ScanNode> scanNodes;
+ protected final List<ScanNode> scanNodes;
private int scanRangeNum = 0;
// number of instances of this query, equals to
// number of backends executing plan fragments on behalf of this query;
@@ -511,7 +508,7 @@ public class Coordinator implements CoordInterface {
}
// Initialize
- protected void prepare() {
+ protected void prepare() throws Exception {
for (PlanFragment fragment : fragments) {
fragmentExecParamsMap.put(fragment.getFragmentId(), new
FragmentExecParams(fragment));
}
@@ -2195,11 +2192,7 @@ public class Coordinator implements CoordInterface {
// Populates scan_range_assignment_.
// <fragment, <server, nodeId>>
- private void computeScanRangeAssignment() throws Exception {
- if (Config.isCloudMode() && Config.enable_cloud_snapshot_version) {
- setVisibleVersionForOlapScanNode();
- }
-
+ protected void computeScanRangeAssignment() throws Exception {
Map<TNetworkAddress, Long> assignedBytesPerHost = Maps.newHashMap();
Map<TNetworkAddress, Long> replicaNumPerHost =
getReplicaNumPerHostForOlapTable();
Collections.shuffle(scanNodes);
@@ -2415,58 +2408,7 @@ public class Coordinator implements CoordInterface {
// TODO: more ranges?
}
- // In cloud mode, meta read lock is not enough to keep a snapshot of the
partition versions.
- // After all scan node are collected, it is possible to gain a snapshot of
the partition version.
- private void setVisibleVersionForOlapScanNode() throws RpcException,
UserException {
- List<CloudPartition> partitions = new ArrayList<>();
- Set<Long> partitionSet = new HashSet<>();
- for (ScanNode node : scanNodes) {
- if (!(node instanceof OlapScanNode)) {
- continue;
- }
-
- OlapScanNode scanNode = (OlapScanNode) node;
- OlapTable table = scanNode.getOlapTable();
- for (Long id : scanNode.getSelectedPartitionIds()) {
- if (!partitionSet.contains(id)) {
- partitionSet.add(id);
- partitions.add((CloudPartition) table.getPartition(id));
- }
- }
- }
-
- if (partitions.isEmpty()) {
- return;
- }
- List<Long> versions =
CloudPartition.getSnapshotVisibleVersion(partitions);
- assert versions.size() == partitions.size() : "the got num versions is
not equals to acquired num versions";
- if (versions.stream().anyMatch(x -> x <= 0)) {
- int size = versions.size();
- for (int i = 0; i < size; ++i) {
- if (versions.get(i) <= 0) {
- LOG.warn("partition {} getVisibleVersion error, the
visibleVersion is {}",
- partitions.get(i).getId(), versions.get(i));
- throw new UserException("partition " +
partitions.get(i).getId()
- + " getVisibleVersion error, the visibleVersion is
" + versions.get(i));
- }
- }
- }
-
- // ATTN: the table ids are ignored here because the both id are
allocated from a same id generator.
- Map<Long, Long> visibleVersionMap = IntStream.range(0, versions.size())
- .boxed()
- .collect(Collectors.toMap(i -> partitions.get(i).getId(),
versions::get));
-
- for (ScanNode node : scanNodes) {
- if (!(node instanceof OlapScanNode)) {
- continue;
- }
-
- OlapScanNode scanNode = (OlapScanNode) node;
- scanNode.updateScanRangeVersions(visibleVersionMap);
- }
- }
// update job progress from BE
public void updateFragmentExecStatus(TReportExecStatusParams params) {
diff --git
a/regression-test/suites/account_p0/test_nereids_authentication.groovy
b/regression-test/suites/account_p0/test_nereids_authentication.groovy
index 46d60732d6b..8bc2d18cb3e 100644
--- a/regression-test/suites/account_p0/test_nereids_authentication.groovy
+++ b/regression-test/suites/account_p0/test_nereids_authentication.groovy
@@ -44,7 +44,14 @@ suite("test_nereids_authentication", "query") {
try_sql "DROP USER ${user}"
sql "CREATE USER ${user} IDENTIFIED BY 'Doris_123456'"
sql "GRANT SELECT_PRIV ON internal.${dbName}.${tableName1} TO ${user}"
-
+ //cloud-mode
+ if (isCloudMode()) {
+ def clusters = sql " SHOW CLUSTERS; "
+ assertTrue(!clusters.isEmpty())
+ def validCluster = clusters[0][0]
+ sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}""";
+ }
+
def tokens = context.config.jdbcUrl.split('/')
def url=tokens[0] + "//" + tokens[2] + "/" + dbName + "?"
def result = connect(user=user, password='Doris_123456', url=url) {
diff --git
a/regression-test/suites/external_table_p0/jdbc/test_jdbc_call.groovy
b/regression-test/suites/external_table_p0/jdbc/test_jdbc_call.groovy
index 43bcdadfba2..bb9e39562cf 100644
--- a/regression-test/suites/external_table_p0/jdbc/test_jdbc_call.groovy
+++ b/regression-test/suites/external_table_p0/jdbc/test_jdbc_call.groovy
@@ -113,6 +113,15 @@ suite("test_jdbc_call",
"p0,external,doris,external_docker,external_docker_doris
sql """create user ${user2}""";
sql """grant load_priv, select_priv on *.*.* to ${user2}"""
+ //cloud-mode
+ if (isCloudMode()) {
+ def clusters = sql " SHOW CLUSTERS; "
+ assertTrue(!clusters.isEmpty())
+ def validCluster = clusters[0][0]
+ sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user1}""";
+ sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user2}""";
+ }
+
def result1 = connect(user="${user1}", password="",
url=context.config.jdbcUrl) {
sql """set enable_nereids_planner=true;"""
sql """set enable_fallback_to_original_planner=false;"""
diff --git a/regression-test/suites/javaudf_p0/test_javaudf_auth.groovy
b/regression-test/suites/javaudf_p0/test_javaudf_auth.groovy
index 5d08d8d0bd0..39afc135f34 100644
--- a/regression-test/suites/javaudf_p0/test_javaudf_auth.groovy
+++ b/regression-test/suites/javaudf_p0/test_javaudf_auth.groovy
@@ -42,6 +42,14 @@ suite("test_javaudf_auth") {
sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'"""
sql """CREATE DATABASE ${dbName}"""
+
+ //cloud-mode
+ if (isCloudMode()) {
+ def clusters = sql " SHOW CLUSTERS; "
+ assertTrue(!clusters.isEmpty())
+ def validCluster = clusters[0][0]
+ sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}""";
+ }
sql """USE ${dbName}"""
sql """ CREATE FUNCTION java_udf_auth_test(int) RETURNS int PROPERTIES (
diff --git
a/regression-test/suites/nereids_p0/authorization/view_authorization.groovy
b/regression-test/suites/nereids_p0/authorization/view_authorization.groovy
index aa76ccba3df..cd11f8ed0e1 100644
--- a/regression-test/suites/nereids_p0/authorization/view_authorization.groovy
+++ b/regression-test/suites/nereids_p0/authorization/view_authorization.groovy
@@ -47,6 +47,14 @@ suite("view_authorization") {
sql "grant SELECT_PRIV on ${db}.${view1} to '${user1}'@'%';"
sql "grant SELECT_PRIV on ${db}.${view3} to '${user1}'@'%';"
+ //cloud-mode
+ if (isCloudMode()) {
+ def clusters = sql " SHOW CLUSTERS; "
+ assertTrue(!clusters.isEmpty())
+ def validCluster = clusters[0][0]
+ sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user1}""";
+ }
+
sql 'sync'
def defaultDbUrl = context.config.jdbcUrl.substring(0,
context.config.jdbcUrl.lastIndexOf("/"))
diff --git
a/regression-test/suites/nereids_p0/insert_into_table/insert_auth.groovy
b/regression-test/suites/nereids_p0/insert_into_table/insert_auth.groovy
index 1a333d41d32..490ff5cbee4 100644
--- a/regression-test/suites/nereids_p0/insert_into_table/insert_auth.groovy
+++ b/regression-test/suites/nereids_p0/insert_into_table/insert_auth.groovy
@@ -48,6 +48,14 @@ suite('nereids_insert_auth') {
try_sql("DROP USER ${user}")
sql """CREATE USER '${user}' IDENTIFIED BY '${pwd}'"""
+ //cloud-mode
+ if (isCloudMode()) {
+ def clusters = sql " SHOW CLUSTERS; "
+ assertTrue(!clusters.isEmpty())
+ def validCluster = clusters[0][0]
+ sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}""";
+ }
+
connect(user=user, password="${pwd}", url=url) {
try {
sql """ insert into ${db}.${t1} values (1, 1) """
diff --git a/regression-test/suites/query_p0/test_row_policy.groovy
b/regression-test/suites/query_p0/test_row_policy.groovy
index 5866d89d96e..620f49dac4e 100644
--- a/regression-test/suites/query_p0/test_row_policy.groovy
+++ b/regression-test/suites/query_p0/test_row_policy.groovy
@@ -29,6 +29,14 @@ suite("test_row_policy") {
sql """DROP ROW POLICY IF EXISTS policy_01 ON ${tableName} FOR ${user}"""
sql """CREATE ROW POLICY IF NOT EXISTS policy_01 ON ${tableName} AS
restrictive TO ${user} USING(id=1)"""
+ //cloud-mode
+ if (isCloudMode()) {
+ def clusters = sql " SHOW CLUSTERS; "
+ assertTrue(!clusters.isEmpty())
+ def validCluster = clusters[0][0]
+ sql """GRANT USAGE_PRIV ON CLUSTER ${validCluster} TO ${user}""";
+ }
+
connect(user=user, password='123456', url=url) {
sql "set enable_nereids_planner = false"
sql "SELECT * FROM ${tableName} a JOIN ${tableName} b ON a.id = b.id"
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]