This is an automated email from the ASF dual-hosted git repository.
yiguolei pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/master by this push:
new 8364165e30 [regression_test](testcase) add regression test case from
session variable skip_storage_engine_merge, skip_delete_predicate and
show_hidden_columns (#12617)
8364165e30 is described below
commit 8364165e3051012c505a34de416d0739564573f0
Author: TengJianPing <[email protected]>
AuthorDate: Fri Sep 16 10:33:12 2022 +0800
[regression_test](testcase) add regression test case from session variable
skip_storage_engine_merge, skip_delete_predicate and show_hidden_columns
(#12617)
also add this function to new olap scan node.
---
be/src/vec/exec/scan/new_olap_scanner.cpp | 35 +++++----
be/src/vec/exec/volap_scanner.cpp | 19 +++--
.../unique/test_unique_table_debug_data.out | 34 +++++++++
.../unique/test_unique_table_debug_data_delete.csv | 1 +
.../unique/test_unique_table_debug_data.groovy | 88 ++++++++++++++++++++++
5 files changed, 156 insertions(+), 21 deletions(-)
diff --git a/be/src/vec/exec/scan/new_olap_scanner.cpp
b/be/src/vec/exec/scan/new_olap_scanner.cpp
index 7ab46adc92..b85652abf0 100644
--- a/be/src/vec/exec/scan/new_olap_scanner.cpp
+++ b/be/src/vec/exec/scan/new_olap_scanner.cpp
@@ -148,7 +148,12 @@ Status NewOlapScanner::_init_tablet_reader_params(
->rowset_meta()
->is_segments_overlapping());
- _tablet_reader_params.direct_mode = _aggregation || single_version;
+ if (_state->skip_storage_engine_merge()) {
+ _tablet_reader_params.direct_mode = true;
+ _aggregation = true;
+ } else {
+ _tablet_reader_params.direct_mode = _aggregation || single_version;
+ }
RETURN_IF_ERROR(_init_return_columns(!_tablet_reader_params.direct_mode));
@@ -169,10 +174,12 @@ Status NewOlapScanner::_init_tablet_reader_params(
std::copy(function_filters.cbegin(), function_filters.cend(),
std::inserter(_tablet_reader_params.function_filters,
_tablet_reader_params.function_filters.begin()));
- auto& delete_preds = _tablet->delete_predicates();
- std::copy(delete_preds.cbegin(), delete_preds.cend(),
- std::inserter(_tablet_reader_params.delete_predicates,
- _tablet_reader_params.delete_predicates.begin()));
+ if (!_state->skip_delete_predicate()) {
+ auto& delete_preds = _tablet->delete_predicates();
+ std::copy(delete_preds.cbegin(), delete_preds.cend(),
+ std::inserter(_tablet_reader_params.delete_predicates,
+
_tablet_reader_params.delete_predicates.begin()));
+ }
// Merge the columns in delete predicate that not in latest schema in to
current tablet schema
for (auto& del_pred_pb : _tablet_reader_params.delete_predicates) {
@@ -228,15 +235,17 @@ Status NewOlapScanner::_init_tablet_reader_params(
_tablet_reader_params.delete_bitmap =
&_tablet->tablet_meta()->delete_bitmap();
}
- TOlapScanNode& olap_scan_node =
((NewOlapScanNode*)_parent)->_olap_scan_node;
- if (olap_scan_node.__isset.sort_info &&
olap_scan_node.sort_info.is_asc_order.size() > 0) {
- _limit = _parent->_limit_per_scanner;
- _tablet_reader_params.read_orderby_key = true;
- if (!olap_scan_node.sort_info.is_asc_order[0]) {
- _tablet_reader_params.read_orderby_key_reverse = true;
+ if (!_state->skip_storage_engine_merge()) {
+ TOlapScanNode& olap_scan_node =
((NewOlapScanNode*)_parent)->_olap_scan_node;
+ if (olap_scan_node.__isset.sort_info &&
olap_scan_node.sort_info.is_asc_order.size() > 0) {
+ _limit = _parent->_limit_per_scanner;
+ _tablet_reader_params.read_orderby_key = true;
+ if (!olap_scan_node.sort_info.is_asc_order[0]) {
+ _tablet_reader_params.read_orderby_key_reverse = true;
+ }
+ _tablet_reader_params.read_orderby_key_num_prefix_columns =
+ olap_scan_node.sort_info.is_asc_order.size();
}
- _tablet_reader_params.read_orderby_key_num_prefix_columns =
- olap_scan_node.sort_info.is_asc_order.size();
}
return Status::OK();
diff --git a/be/src/vec/exec/volap_scanner.cpp
b/be/src/vec/exec/volap_scanner.cpp
index 4b820aa1ce..4157bdb89d 100644
--- a/be/src/vec/exec/volap_scanner.cpp
+++ b/be/src/vec/exec/volap_scanner.cpp
@@ -173,6 +173,7 @@ Status VOlapScanner::_init_tablet_reader_params(
if (_runtime_state->skip_storage_engine_merge()) {
_tablet_reader_params.direct_mode = true;
+ _aggregation = true;
} else {
_tablet_reader_params.direct_mode = _aggregation || single_version;
}
@@ -257,15 +258,17 @@ Status VOlapScanner::_init_tablet_reader_params(
_tablet_reader_params.delete_bitmap =
&_tablet->tablet_meta()->delete_bitmap();
}
- if (_parent->_olap_scan_node.__isset.sort_info &&
- _parent->_olap_scan_node.sort_info.is_asc_order.size() > 0) {
- _limit = _parent->_limit_per_scanner;
- _tablet_reader_params.read_orderby_key = true;
- if (!_parent->_olap_scan_node.sort_info.is_asc_order[0]) {
- _tablet_reader_params.read_orderby_key_reverse = true;
+ if (!_runtime_state->skip_storage_engine_merge()) {
+ if (_parent->_olap_scan_node.__isset.sort_info &&
+ _parent->_olap_scan_node.sort_info.is_asc_order.size() > 0) {
+ _limit = _parent->_limit_per_scanner;
+ _tablet_reader_params.read_orderby_key = true;
+ if (!_parent->_olap_scan_node.sort_info.is_asc_order[0]) {
+ _tablet_reader_params.read_orderby_key_reverse = true;
+ }
+ _tablet_reader_params.read_orderby_key_num_prefix_columns =
+ _parent->_olap_scan_node.sort_info.is_asc_order.size();
}
- _tablet_reader_params.read_orderby_key_num_prefix_columns =
- _parent->_olap_scan_node.sort_info.is_asc_order.size();
}
return Status::OK();
diff --git
a/regression-test/data/data_model_p0/unique/test_unique_table_debug_data.out
b/regression-test/data/data_model_p0/unique/test_unique_table_debug_data.out
new file mode 100644
index 0000000000..f055dd748b
--- /dev/null
+++ b/regression-test/data/data_model_p0/unique/test_unique_table_debug_data.out
@@ -0,0 +1,34 @@
+-- This file is automatically generated. You should know what you did if you
want to edit this
+-- !select_init --
+1 11
+2 11
+3 1
+
+-- !select_skip_merge --
+1 1 0
+1 11 0
+2 1 0
+2 11 0
+3 1 0
+
+-- !select_batch_delete --
+2 11
+3 1
+
+-- !select_sql_delete --
+3 1
+
+-- !select_skip_merge_after_delete --
+1 1 0
+1 11 0
+1 111 1
+3 1 0
+
+-- !select_skip_delete --
+1 1 0
+1 11 0
+1 111 1
+2 1 0
+2 11 0
+3 1 0
+
diff --git
a/regression-test/data/data_model_p0/unique/test_unique_table_debug_data_delete.csv
b/regression-test/data/data_model_p0/unique/test_unique_table_debug_data_delete.csv
new file mode 100644
index 0000000000..1c19a02704
--- /dev/null
+++
b/regression-test/data/data_model_p0/unique/test_unique_table_debug_data_delete.csv
@@ -0,0 +1 @@
+1|111
diff --git
a/regression-test/suites/data_model_p0/unique/test_unique_table_debug_data.groovy
b/regression-test/suites/data_model_p0/unique/test_unique_table_debug_data.groovy
new file mode 100644
index 0000000000..5b90a0dc4f
--- /dev/null
+++
b/regression-test/suites/data_model_p0/unique/test_unique_table_debug_data.groovy
@@ -0,0 +1,88 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_unique_table_debug_data") {
+
+ sql "ADMIN SET FRONTEND CONFIG ('enable_batch_delete_by_default' = 'true')"
+ sql "SET show_hidden_columns=false"
+ sql "SET skip_delete_predicate=false"
+ sql "SET skip_storage_engine_merge=false"
+
+ def tbName = "test_unique_table_debug_data"
+ sql "DROP TABLE IF EXISTS ${tbName}"
+ sql """
+ CREATE TABLE IF NOT EXISTS ${tbName} (
+ a int, b int
+ )
+ unique key (a)
+ distributed by hash(a) buckets 16
+ properties(
+ "replication_allocation" = "tag.location.default:1",
+ "disable_auto_compaction" = "false"
+ );
+ """
+
+
//BackendId,Cluster,IP,HeartbeatPort,BePort,HttpPort,BrpcPort,LastStartTime,LastHeartbeat,Alive,SystemDecommissioned,ClusterDecommissioned,TabletNum,DataUsedCapacity,AvailCapacity,TotalCapacity,UsedPct,MaxDiskUsedPct,Tag,ErrMsg,Version,Status
+ String[][] backends = sql """ show backends; """
+ assertTrue(backends.size() > 0)
+ StringBuilder sbCommand = new StringBuilder();
+
+ sql "insert into ${tbName} values(1,1),(2,1);"
+ sql "insert into ${tbName} values(1,11),(2,11);"
+ sql "insert into ${tbName} values(3,1);"
+
+ qt_select_init "select * from ${tbName} order by a, b"
+
+ // enable skip_storage_engine_merge and check select result,
+ // not merged original rows are returned:
+ sql "SET skip_storage_engine_merge=true"
+ qt_select_skip_merge "select * from ${tbName} order by a, b"
+
+ // turn off skip_storage_engine_merge
+ sql "SET skip_storage_engine_merge=false"
+
+ // batch delete and select again:
+ // curl --location-trusted -uroot: -H "column_separator:|" -H "columns:a,
b" -H "merge_type: delete" -T delete.csv
http://127.0.0.1:8030/api/test_skip/t1/_stream_load
+ streamLoad {
+ table "${tbName}"
+
+ set 'column_separator', '|'
+ set 'columns', 'a, b'
+ set 'merge_type', 'delete'
+
+ file 'test_unique_table_debug_data_delete.csv'
+
+ time 10000 // limit inflight 10s
+ }
+ qt_select_batch_delete "select * from ${tbName} order by a, b"
+
+ // delete rows with a = 2:
+ sql "delete from ${tbName} where a = 2;"
+ qt_select_sql_delete "select * from ${tbName} order by a, b"
+
+ // enable skip_storage_engine_merge and select, rows deleted with delete
statement is not returned:
+ sql "SET skip_storage_engine_merge=true"
+ qt_select_skip_merge_after_delete "select * from ${tbName} order by a, b"
+
+ // enable skip_delete_predicate, rows deleted with delete statement is
also returned:
+ sql "SET skip_delete_predicate=true"
+ qt_select_skip_delete "select * from ${tbName} order by a, b"
+
+ sql "DROP TABLE ${tbName}"
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]