This is an automated email from the ASF dual-hosted git repository.
yiguolei pushed a commit to branch branch-4.0
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/branch-4.0 by this push:
new eb8ede1dfd8 branch-4.0: [fix](block-rule) Skip SQL block rules check
for EXPLAIN statements #59445 (#59509)
eb8ede1dfd8 is described below
commit eb8ede1dfd865033836304f63d4ce61efb8b7ece
Author: github-actions[bot]
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Mon Jan 5 10:19:52 2026 +0800
branch-4.0: [fix](block-rule) Skip SQL block rules check for EXPLAIN
statements #59445 (#59509)
Cherry-picked from #59445
Co-authored-by: Chenjunwei <[email protected]>
---
.../trees/plans/commands/ExplainCommand.java | 3 +-
.../hive/test_external_sql_block_rule.groovy | 130 ++++++++++-------
.../iceberg/test_iceberg_sql_block_rule.groovy | 156 +++++++++++++++++++++
.../paimon/test_paimon_sql_block_rule.groovy | 141 +++++++++++++++++++
.../sql_block_rule_p0/test_sql_block_rule.groovy | 6 +-
5 files changed, 381 insertions(+), 55 deletions(-)
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/ExplainCommand.java
b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/ExplainCommand.java
index 834007ad81d..badd660734c 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/ExplainCommand.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/nereids/trees/plans/commands/ExplainCommand.java
@@ -104,7 +104,8 @@ public class ExplainCommand extends Command implements
NoForward {
}
planner.plan(logicalPlanAdapter, ctx.getSessionVariable().toThrift());
executor.setPlanner(planner);
- executor.checkBlockRules();
+ // Skip SQL block rules check for EXPLAIN statements since they only
show
+ // the execution plan without actually executing the query
if (showPlanProcess) {
executor.handleExplainPlanProcessStmt(planner.getCascadesContext().getPlanProcesses());
} else {
diff --git
a/regression-test/suites/external_table_p0/hive/test_external_sql_block_rule.groovy
b/regression-test/suites/external_table_p0/hive/test_external_sql_block_rule.groovy
index 0f118483224..4629d9583e3 100644
---
a/regression-test/suites/external_table_p0/hive/test_external_sql_block_rule.groovy
+++
b/regression-test/suites/external_table_p0/hive/test_external_sql_block_rule.groovy
@@ -24,81 +24,111 @@ suite("test_external_sql_block_rule",
"external_docker,hive,external_docker_hive
String externalEnvIp = context.config.otherConfigs.get("externalEnvIp")
String hms_port = context.config.otherConfigs.get("hive2HmsPort")
+ String catalog_name = "test_hive2_external_sql_block_rule"
- sql """drop catalog if exists test_hive2_external_sql_block_rule """
+ sql """drop catalog if exists ${catalog_name}"""
- sql """CREATE CATALOG test_hive2_external_sql_block_rule PROPERTIES (
+ sql """CREATE CATALOG ${catalog_name} PROPERTIES (
'type'='hms',
'hive.metastore.uris' = 'thrift://${externalEnvIp}:${hms_port}',
'hadoop.username' = 'hive'
);"""
- sql "use test_hive2_external_sql_block_rule.`default`";
+ sql "use ${catalog_name}.`default`";
qt_sql01 """select * from parquet_partition_table order by
l_linenumber,l_orderkey limit 10;"""
- sql """drop sql_block_rule if exists external_hive_partition"""
- sql """create sql_block_rule external_hive_partition
properties("partition_num" = "3", "global" = "false");"""
- sql """drop sql_block_rule if exists external_hive_partition2"""
- sql """create sql_block_rule external_hive_partition2
properties("tablet_num" = "3", "global" = "false");"""
- sql """drop sql_block_rule if exists external_hive_partition3"""
- sql """create sql_block_rule external_hive_partition3
properties("cardinality" = "3", "global" = "false");"""
- // create 3 users
- sql """drop user if exists external_block_user1"""
- sql """create user external_block_user1;"""
- sql """SET PROPERTY FOR 'external_block_user1' 'sql_block_rules' =
'external_hive_partition';"""
- sql """grant all on *.*.* to external_block_user1;"""
- //cloud-mode
- if (isCloudMode()) {
- def clusters = sql " SHOW CLUSTERS; "
- assertTrue(!clusters.isEmpty())
- def validCluster = clusters[0][0]
- sql """GRANT USAGE_PRIV ON CLUSTER `${validCluster}` TO
external_block_user1;""";
- }
+ // Clean up existing rules and users
+ sql """drop sql_block_rule if exists hive_partition_rule"""
+ sql """drop sql_block_rule if exists hive_split_rule"""
+ sql """drop sql_block_rule if exists hive_cardinality_rule"""
+ sql """drop sql_block_rule if exists hive_regex_rule"""
+ sql """drop user if exists hive_block_user1"""
+ sql """drop user if exists hive_block_user2"""
+ sql """drop user if exists hive_block_user3"""
+ sql """drop user if exists hive_block_user4"""
+
+ // Create non-global rules (won't affect other parallel tests)
+ sql """create sql_block_rule hive_partition_rule
properties("partition_num" = "3", "global" = "false");"""
+ sql """create sql_block_rule hive_split_rule properties("tablet_num" =
"3", "global" = "false");"""
+ sql """create sql_block_rule hive_cardinality_rule
properties("cardinality" = "3", "global" = "false");"""
+ sql """create sql_block_rule hive_regex_rule properties("sql" = "SELECT
\\\\*", "global" = "false");"""
+
+ // Create test users and bind rules
+ sql """create user hive_block_user1;"""
+ sql """SET PROPERTY FOR 'hive_block_user1' 'sql_block_rules' =
'hive_partition_rule';"""
+ sql """grant all on *.*.* to hive_block_user1;"""
+
+ sql """create user hive_block_user2;"""
+ sql """SET PROPERTY FOR 'hive_block_user2' 'sql_block_rules' =
'hive_split_rule';"""
+ sql """grant all on *.*.* to hive_block_user2;"""
+
+ sql """create user hive_block_user3;"""
+ sql """SET PROPERTY FOR 'hive_block_user3' 'sql_block_rules' =
'hive_cardinality_rule';"""
+ sql """grant all on *.*.* to hive_block_user3;"""
+
+ sql """create user hive_block_user4;"""
+ sql """SET PROPERTY FOR 'hive_block_user4' 'sql_block_rules' =
'hive_regex_rule';"""
+ sql """grant all on *.*.* to hive_block_user4;"""
- sql """drop user if exists external_block_user2"""
- sql """create user external_block_user2;"""
- sql """SET PROPERTY FOR 'external_block_user2' 'sql_block_rules' =
'external_hive_partition2';"""
- sql """grant all on *.*.* to external_block_user2;"""
- //cloud-mode
+ // cloud-mode: grant cluster privileges
if (isCloudMode()) {
def clusters = sql " SHOW CLUSTERS; "
assertTrue(!clusters.isEmpty())
def validCluster = clusters[0][0]
- sql """GRANT USAGE_PRIV ON CLUSTER `${validCluster}` TO
external_block_user2;""";
+ sql """GRANT USAGE_PRIV ON CLUSTER `${validCluster}` TO
hive_block_user1;"""
+ sql """GRANT USAGE_PRIV ON CLUSTER `${validCluster}` TO
hive_block_user2;"""
+ sql """GRANT USAGE_PRIV ON CLUSTER `${validCluster}` TO
hive_block_user3;"""
+ sql """GRANT USAGE_PRIV ON CLUSTER `${validCluster}` TO
hive_block_user4;"""
}
- sql """drop user if exists external_block_user3"""
- sql """create user external_block_user3;"""
- sql """SET PROPERTY FOR 'external_block_user3' 'sql_block_rules' =
'external_hive_partition3';"""
- sql """grant all on *.*.* to external_block_user3;"""
- //cloud-mode
- if (isCloudMode()) {
- def clusters = sql " SHOW CLUSTERS; "
- assertTrue(!clusters.isEmpty())
- def validCluster = clusters[0][0]
- sql """GRANT USAGE_PRIV ON CLUSTER `${validCluster}` TO
external_block_user3;""";
+ // Test 1: partition_num rule
+ connect('hive_block_user1', '', context.config.jdbcUrl) {
+ test {
+ sql """select * from
${catalog_name}.`default`.parquet_partition_table order by l_linenumber limit
10;"""
+ exception """sql hits sql block rule: hive_partition_rule, reach
partition_num : 3"""
+ }
+ // Test EXPLAIN should not be blocked
+ sql """explain select * from
${catalog_name}.`default`.parquet_partition_table order by l_linenumber limit
10;"""
}
- // login as external_block_user1
- def result1 = connect('external_block_user1', '', context.config.jdbcUrl) {
+ // Test 2: tablet_num (split) rule
+ connect('hive_block_user2', '', context.config.jdbcUrl) {
test {
- sql """select * from
test_hive2_external_sql_block_rule.`default`.parquet_partition_table order by
l_linenumber limit 10;"""
- exception """sql hits sql block rule: external_hive_partition,
reach partition_num : 3"""
+ sql """select * from
${catalog_name}.`default`.parquet_partition_table order by l_linenumber limit
10;"""
+ exception """sql hits sql block rule: hive_split_rule, reach
tablet_num : 3"""
}
+ // Test EXPLAIN should not be blocked
+ sql """explain select * from
${catalog_name}.`default`.parquet_partition_table order by l_linenumber limit
10;"""
}
- // login as external_block_user2
- def result2 = connect('external_block_user2', '', context.config.jdbcUrl) {
+
+ // Test 3: cardinality rule
+ connect('hive_block_user3', '', context.config.jdbcUrl) {
test {
- sql """select * from
test_hive2_external_sql_block_rule.`default`.parquet_partition_table order by
l_linenumber limit 10;"""
- exception """sql hits sql block rule: external_hive_partition2,
reach tablet_num : 3"""
+ sql """select * from
${catalog_name}.`default`.parquet_partition_table order by l_linenumber limit
10;"""
+ exception """sql hits sql block rule: hive_cardinality_rule, reach
cardinality : 3"""
}
+ // Test EXPLAIN should not be blocked
+ sql """explain select * from
${catalog_name}.`default`.parquet_partition_table order by l_linenumber limit
10;"""
}
- // login as external_block_user3
- def result3 = connect('external_block_user3', '', context.config.jdbcUrl) {
+
+ // Test 4: regex rule
+ connect('hive_block_user4', '', context.config.jdbcUrl) {
test {
- sql """select * from
test_hive2_external_sql_block_rule.`default`.parquet_partition_table order by
l_linenumber limit 10;"""
- exception """sql hits sql block rule: external_hive_partition3,
reach cardinality : 3"""
+ sql """SELECT * FROM
${catalog_name}.`default`.parquet_partition_table limit 10;"""
+ exception """sql match regex sql block rule: hive_regex_rule"""
}
+ // Test EXPLAIN should not be blocked by regex rule
+ sql """EXPLAIN SELECT * FROM
${catalog_name}.`default`.parquet_partition_table limit 10;"""
}
-}
+ // Cleanup
+ sql """drop user if exists hive_block_user1"""
+ sql """drop user if exists hive_block_user2"""
+ sql """drop user if exists hive_block_user3"""
+ sql """drop user if exists hive_block_user4"""
+ sql """drop sql_block_rule if exists hive_partition_rule"""
+ sql """drop sql_block_rule if exists hive_split_rule"""
+ sql """drop sql_block_rule if exists hive_cardinality_rule"""
+ sql """drop sql_block_rule if exists hive_regex_rule"""
+ sql """drop catalog if exists ${catalog_name}"""
+}
diff --git
a/regression-test/suites/external_table_p0/iceberg/test_iceberg_sql_block_rule.groovy
b/regression-test/suites/external_table_p0/iceberg/test_iceberg_sql_block_rule.groovy
new file mode 100644
index 00000000000..12b0cd15365
--- /dev/null
+++
b/regression-test/suites/external_table_p0/iceberg/test_iceberg_sql_block_rule.groovy
@@ -0,0 +1,156 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("test_iceberg_sql_block_rule",
"p0,external,doris,external_docker,external_docker_doris") {
+ String enabled = context.config.otherConfigs.get("enableIcebergTest")
+ if (enabled == null || !enabled.equalsIgnoreCase("true")) {
+ logger.info("disable iceberg test.")
+ return
+ }
+
+ String rest_port = context.config.otherConfigs.get("iceberg_rest_uri_port")
+ String minio_port = context.config.otherConfigs.get("iceberg_minio_port")
+ String externalEnvIp = context.config.otherConfigs.get("externalEnvIp")
+ String catalog_name = "test_iceberg_sql_block_rule_ctl"
+ String db_name = "iceberg_sql_block_rule_db"
+ String table_name = "iceberg_sql_block_rule_tbl"
+
+ sql """drop catalog if exists ${catalog_name}"""
+ sql """
+ CREATE CATALOG ${catalog_name} PROPERTIES (
+ 'type'='iceberg',
+ 'iceberg.catalog.type'='rest',
+ 'uri' = 'http://${externalEnvIp}:${rest_port}',
+ "s3.access_key" = "admin",
+ "s3.secret_key" = "password",
+ "s3.endpoint" = "http://${externalEnvIp}:${minio_port}",
+ "s3.region" = "us-east-1"
+ );"""
+
+ sql """switch ${catalog_name}"""
+ sql """create database if not exists ${db_name}"""
+ sql """use ${db_name}"""
+ sql """drop table if exists ${table_name}"""
+ sql """
+ create table ${table_name} (
+ id int,
+ partition_key int,
+ val string
+ ) PARTITION BY LIST (partition_key) ()
+ """
+ sql """insert into ${table_name} values (1, 1, 'a')"""
+ sql """insert into ${table_name} values (2, 2, 'b')"""
+ sql """insert into ${table_name} values (3, 3, 'c')"""
+ sql """insert into ${table_name} values (4, 4, 'd')"""
+
+ // Clean up existing rules and users
+ sql """drop sql_block_rule if exists iceberg_partition_rule"""
+ sql """drop sql_block_rule if exists iceberg_split_rule"""
+ sql """drop sql_block_rule if exists iceberg_cardinality_rule"""
+ sql """drop sql_block_rule if exists iceberg_regex_rule"""
+ sql """drop user if exists iceberg_block_user1"""
+ sql """drop user if exists iceberg_block_user2"""
+ sql """drop user if exists iceberg_block_user3"""
+ sql """drop user if exists iceberg_block_user4"""
+
+ // Create non-global rules (won't affect other parallel tests)
+ sql """create sql_block_rule iceberg_partition_rule
properties("partition_num" = "1", "global" = "false");"""
+ sql """create sql_block_rule iceberg_split_rule properties("tablet_num" =
"1", "global" = "false");"""
+ sql """create sql_block_rule iceberg_cardinality_rule
properties("cardinality" = "1", "global" = "false");"""
+ sql """create sql_block_rule iceberg_regex_rule properties("sql" = "SELECT
\\\\*", "global" = "false");"""
+
+ // Create test users and bind rules
+ sql """create user iceberg_block_user1;"""
+ sql """SET PROPERTY FOR 'iceberg_block_user1' 'sql_block_rules' =
'iceberg_partition_rule';"""
+ sql """grant all on *.*.* to iceberg_block_user1;"""
+
+ sql """create user iceberg_block_user2;"""
+ sql """SET PROPERTY FOR 'iceberg_block_user2' 'sql_block_rules' =
'iceberg_split_rule';"""
+ sql """grant all on *.*.* to iceberg_block_user2;"""
+
+ sql """create user iceberg_block_user3;"""
+ sql """SET PROPERTY FOR 'iceberg_block_user3' 'sql_block_rules' =
'iceberg_cardinality_rule';"""
+ sql """grant all on *.*.* to iceberg_block_user3;"""
+
+ sql """create user iceberg_block_user4;"""
+ sql """SET PROPERTY FOR 'iceberg_block_user4' 'sql_block_rules' =
'iceberg_regex_rule';"""
+ sql """grant all on *.*.* to iceberg_block_user4;"""
+
+ // cloud-mode: grant cluster privileges
+ if (isCloudMode()) {
+ def clusters = sql " SHOW CLUSTERS; "
+ assertTrue(!clusters.isEmpty())
+ def validCluster = clusters[0][0]
+ sql """GRANT USAGE_PRIV ON CLUSTER `${validCluster}` TO
iceberg_block_user1;"""
+ sql """GRANT USAGE_PRIV ON CLUSTER `${validCluster}` TO
iceberg_block_user2;"""
+ sql """GRANT USAGE_PRIV ON CLUSTER `${validCluster}` TO
iceberg_block_user3;"""
+ sql """GRANT USAGE_PRIV ON CLUSTER `${validCluster}` TO
iceberg_block_user4;"""
+ }
+
+ // Test 1: partition_num rule
+ connect('iceberg_block_user1', '', context.config.jdbcUrl) {
+ test {
+ sql """select * from ${catalog_name}.${db_name}.${table_name}"""
+ exception """sql hits sql block rule: iceberg_partition_rule,
reach partition_num : 1"""
+ }
+ // Test EXPLAIN should not be blocked
+ sql """explain select * from
${catalog_name}.${db_name}.${table_name}"""
+ }
+
+ // Test 2: tablet_num (split) rule
+ connect('iceberg_block_user2', '', context.config.jdbcUrl) {
+ test {
+ sql """select * from ${catalog_name}.${db_name}.${table_name}"""
+ exception """sql hits sql block rule: iceberg_split_rule, reach
tablet_num : 1"""
+ }
+ // Test EXPLAIN should not be blocked
+ sql """explain select * from
${catalog_name}.${db_name}.${table_name}"""
+ }
+
+ // Test 3: cardinality rule
+ connect('iceberg_block_user3', '', context.config.jdbcUrl) {
+ test {
+ sql """select * from ${catalog_name}.${db_name}.${table_name}"""
+ exception """sql hits sql block rule: iceberg_cardinality_rule,
reach cardinality : 1"""
+ }
+ // Test EXPLAIN should not be blocked
+ sql """explain select * from
${catalog_name}.${db_name}.${table_name}"""
+ }
+
+ // Test 4: regex rule
+ connect('iceberg_block_user4', '', context.config.jdbcUrl) {
+ test {
+ sql """SELECT * FROM ${catalog_name}.${db_name}.${table_name}"""
+ exception """sql match regex sql block rule: iceberg_regex_rule"""
+ }
+ // Test EXPLAIN should not be blocked by regex rule
+ sql """EXPLAIN SELECT * FROM
${catalog_name}.${db_name}.${table_name}"""
+ }
+
+ // Cleanup
+ sql """drop user if exists iceberg_block_user1"""
+ sql """drop user if exists iceberg_block_user2"""
+ sql """drop user if exists iceberg_block_user3"""
+ sql """drop user if exists iceberg_block_user4"""
+ sql """drop sql_block_rule if exists iceberg_partition_rule"""
+ sql """drop sql_block_rule if exists iceberg_split_rule"""
+ sql """drop sql_block_rule if exists iceberg_cardinality_rule"""
+ sql """drop sql_block_rule if exists iceberg_regex_rule"""
+ sql """drop table if exists ${table_name}"""
+ sql """drop database if exists ${db_name}"""
+ sql """drop catalog if exists ${catalog_name}"""
+}
diff --git
a/regression-test/suites/external_table_p0/paimon/test_paimon_sql_block_rule.groovy
b/regression-test/suites/external_table_p0/paimon/test_paimon_sql_block_rule.groovy
new file mode 100644
index 00000000000..b26f7a6cca0
--- /dev/null
+++
b/regression-test/suites/external_table_p0/paimon/test_paimon_sql_block_rule.groovy
@@ -0,0 +1,141 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("test_paimon_sql_block_rule",
"p0,external,doris,external_docker,external_docker_doris") {
+ String enabled = context.config.otherConfigs.get("enablePaimonTest")
+ if (enabled == null || !enabled.equalsIgnoreCase("true")) {
+ logger.info("disable paimon test.")
+ return
+ }
+
+ String catalog_name = "test_paimon_sql_block_rule_ctl"
+ String db_name = "test_paimon_partition"
+ String table_name = "sales_by_date"
+ String minio_port = context.config.otherConfigs.get("iceberg_minio_port")
+ String externalEnvIp = context.config.otherConfigs.get("externalEnvIp")
+
+ sql """drop catalog if exists ${catalog_name}"""
+ sql """
+ CREATE CATALOG ${catalog_name} PROPERTIES (
+ "type" = "paimon",
+ "warehouse" = "s3://warehouse/wh",
+ "s3.endpoint" = "http://${externalEnvIp}:${minio_port}",
+ "s3.access_key" = "admin",
+ "s3.secret_key" = "password",
+ "s3.path.style.access" = "true"
+ );
+ """
+
+ sql """switch ${catalog_name}"""
+ sql """use ${db_name}"""
+ // Use an existing Paimon table; DDL is not supported for this catalog.
+
+ // Clean up existing rules and users
+ sql """drop sql_block_rule if exists paimon_partition_rule"""
+ sql """drop sql_block_rule if exists paimon_split_rule"""
+ sql """drop sql_block_rule if exists paimon_cardinality_rule"""
+ sql """drop sql_block_rule if exists paimon_regex_rule"""
+ sql """drop user if exists paimon_block_user1"""
+ sql """drop user if exists paimon_block_user2"""
+ sql """drop user if exists paimon_block_user3"""
+ sql """drop user if exists paimon_block_user4"""
+
+ // Create non-global rules (won't affect other parallel tests)
+ sql """create sql_block_rule paimon_partition_rule
properties("partition_num" = "1", "global" = "false");"""
+ sql """create sql_block_rule paimon_split_rule properties("tablet_num" =
"1", "global" = "false");"""
+ sql """create sql_block_rule paimon_cardinality_rule
properties("cardinality" = "1", "global" = "false");"""
+ sql """create sql_block_rule paimon_regex_rule properties("sql" = "SELECT
\\\\*", "global" = "false");"""
+
+ // Create test users and bind rules
+ sql """create user paimon_block_user1;"""
+ sql """SET PROPERTY FOR 'paimon_block_user1' 'sql_block_rules' =
'paimon_partition_rule';"""
+ sql """grant all on *.*.* to paimon_block_user1;"""
+
+ sql """create user paimon_block_user2;"""
+ sql """SET PROPERTY FOR 'paimon_block_user2' 'sql_block_rules' =
'paimon_split_rule';"""
+ sql """grant all on *.*.* to paimon_block_user2;"""
+
+ sql """create user paimon_block_user3;"""
+ sql """SET PROPERTY FOR 'paimon_block_user3' 'sql_block_rules' =
'paimon_cardinality_rule';"""
+ sql """grant all on *.*.* to paimon_block_user3;"""
+
+ sql """create user paimon_block_user4;"""
+ sql """SET PROPERTY FOR 'paimon_block_user4' 'sql_block_rules' =
'paimon_regex_rule';"""
+ sql """grant all on *.*.* to paimon_block_user4;"""
+
+ // cloud-mode: grant cluster privileges
+ if (isCloudMode()) {
+ def clusters = sql " SHOW CLUSTERS; "
+ assertTrue(!clusters.isEmpty())
+ def validCluster = clusters[0][0]
+ sql """GRANT USAGE_PRIV ON CLUSTER `${validCluster}` TO
paimon_block_user1;"""
+ sql """GRANT USAGE_PRIV ON CLUSTER `${validCluster}` TO
paimon_block_user2;"""
+ sql """GRANT USAGE_PRIV ON CLUSTER `${validCluster}` TO
paimon_block_user3;"""
+ sql """GRANT USAGE_PRIV ON CLUSTER `${validCluster}` TO
paimon_block_user4;"""
+ }
+
+ // Test 1: partition_num rule
+ connect('paimon_block_user1', '', context.config.jdbcUrl) {
+ test {
+ sql """select * from ${catalog_name}.${db_name}.${table_name}"""
+ exception """sql hits sql block rule: paimon_partition_rule, reach
partition_num : 1"""
+ }
+ // Test EXPLAIN should not be blocked
+ sql """explain select * from
${catalog_name}.${db_name}.${table_name}"""
+ }
+
+ // Test 2: tablet_num (split) rule
+ connect('paimon_block_user2', '', context.config.jdbcUrl) {
+ test {
+ sql """select * from ${catalog_name}.${db_name}.${table_name}"""
+ exception """sql hits sql block rule: paimon_split_rule, reach
tablet_num : 1"""
+ }
+ // Test EXPLAIN should not be blocked
+ sql """explain select * from
${catalog_name}.${db_name}.${table_name}"""
+ }
+
+ // Test 3: cardinality rule
+ connect('paimon_block_user3', '', context.config.jdbcUrl) {
+ test {
+ sql """select * from ${catalog_name}.${db_name}.${table_name}"""
+ exception """sql hits sql block rule: paimon_cardinality_rule,
reach cardinality : 1"""
+ }
+ // Test EXPLAIN should not be blocked
+ sql """explain select * from
${catalog_name}.${db_name}.${table_name}"""
+ }
+
+ // Test 4: regex rule
+ connect('paimon_block_user4', '', context.config.jdbcUrl) {
+ test {
+ sql """SELECT * FROM ${catalog_name}.${db_name}.${table_name}"""
+ exception """sql match regex sql block rule: paimon_regex_rule"""
+ }
+ // Test EXPLAIN should not be blocked by regex rule
+ sql """EXPLAIN SELECT * FROM
${catalog_name}.${db_name}.${table_name}"""
+ }
+
+ // Cleanup
+ sql """drop user if exists paimon_block_user1"""
+ sql """drop user if exists paimon_block_user2"""
+ sql """drop user if exists paimon_block_user3"""
+ sql """drop user if exists paimon_block_user4"""
+ sql """drop sql_block_rule if exists paimon_partition_rule"""
+ sql """drop sql_block_rule if exists paimon_split_rule"""
+ sql """drop sql_block_rule if exists paimon_cardinality_rule"""
+ sql """drop sql_block_rule if exists paimon_regex_rule"""
+ sql """drop catalog if exists ${catalog_name}"""
+}
diff --git
a/regression-test/suites/sql_block_rule_p0/test_sql_block_rule.groovy
b/regression-test/suites/sql_block_rule_p0/test_sql_block_rule.groovy
index c55ff805d6a..e24cfe2d71a 100644
--- a/regression-test/suites/sql_block_rule_p0/test_sql_block_rule.groovy
+++ b/regression-test/suites/sql_block_rule_p0/test_sql_block_rule.groovy
@@ -53,10 +53,8 @@ suite("test_sql_block_rule", "nonConcurrent") {
sql("SELECT * FROM table_2", false)
exception "sql match regex sql block rule: test_rule_sql"
}
- test {
- sql("EXPLAIN SELECT * FROM table_2", false)
- exception "sql match regex sql block rule: test_rule_sql"
- }
+ // EXPLAIN should not be blocked by sql block rule
+ sql "EXPLAIN SELECT * FROM table_2"
test {
sql("INSERT INTO table_2 SELECT * FROM table_2", false)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]