This is an automated email from the ASF dual-hosted git repository.
yiguolei pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/master by this push:
new 2cd3bf80dc [bugfix](schema change)fix core dump on
vectorized_alter_table (#11538)
2cd3bf80dc is described below
commit 2cd3bf80dcb3a4418228e97cb79b9673b7828cb2
Author: Pxl <[email protected]>
AuthorDate: Mon Aug 8 10:45:28 2022 +0800
[bugfix](schema change)fix core dump on vectorized_alter_table (#11538)
---
be/src/common/config.h | 2 +-
be/src/olap/schema_change.cpp | 19 +------
be/src/vec/exec/volap_scanner.cpp | 16 +++---
.../data/rollup/test_materialized_view_hll.out | Bin 0 -> 451 bytes
.../rollup/test_materialized_view_hll.groovy | 63 +++++++++++++++++++++
5 files changed, 75 insertions(+), 25 deletions(-)
diff --git a/be/src/common/config.h b/be/src/common/config.h
index dbeebf41bc..258000e540 100644
--- a/be/src/common/config.h
+++ b/be/src/common/config.h
@@ -252,7 +252,7 @@ CONF_Bool(enable_low_cardinality_optimize, "true");
CONF_mBool(disable_auto_compaction, "false");
// whether enable vectorized compaction
CONF_Bool(enable_vectorized_compaction, "true");
-// whether enable vectorized schema change, material-view or rollup task will
fail if this config open.
+// whether enable vectorized schema change/material-view/rollup task.
CONF_Bool(enable_vectorized_alter_table, "false");
// check the configuration of auto compaction in seconds when auto compaction
disabled
diff --git a/be/src/olap/schema_change.cpp b/be/src/olap/schema_change.cpp
index 6dc20fc6c5..7dfdc36117 100644
--- a/be/src/olap/schema_change.cpp
+++ b/be/src/olap/schema_change.cpp
@@ -817,7 +817,7 @@ Status RowBlockChanger::change_block(vectorized::Block*
ref_block,
vectorized::VExprContext* ctx = nullptr;
RETURN_IF_ERROR(
vectorized::VExpr::create_expr_tree(&pool,
*_schema_mapping[idx].expr, &ctx));
-
+ Defer defer {[&]() { ctx->close(state); }};
RETURN_IF_ERROR(ctx->prepare(state, row_desc));
RETURN_IF_ERROR(ctx->open(state));
@@ -834,8 +834,6 @@ Status RowBlockChanger::change_block(vectorized::Block*
ref_block,
ref_block->get_by_position(result_column_id).column));
}
swap_idx_map[result_column_id] = idx;
-
- ctx->close(state);
} else {
// same type, just swap column
swap_idx_map[ref_idx] = idx;
@@ -1632,15 +1630,10 @@ bool
SchemaChangeWithSorting::_external_sorting(vector<RowsetSharedPtr>& src_row
}
rs_readers.push_back(rs_reader);
}
- // get cur schema if rowset schema exist, rowset schema must be newer than
tablet schema
- TabletSchemaSPtr cur_tablet_schema =
src_rowsets.back()->rowset_meta()->tablet_schema();
- if (cur_tablet_schema == nullptr) {
- cur_tablet_schema = new_tablet->tablet_schema();
- }
Merger::Statistics stats;
- auto res = Merger::merge_rowsets(new_tablet, READER_ALTER_TABLE,
cur_tablet_schema, rs_readers,
- rowset_writer, &stats);
+ auto res = Merger::merge_rowsets(new_tablet, READER_ALTER_TABLE,
new_tablet->tablet_schema(),
+ rs_readers, rowset_writer, &stats);
if (!res) {
LOG(WARNING) << "failed to merge rowsets. tablet=" <<
new_tablet->full_name()
<< ", version=" << rowset_writer->version().first << "-"
@@ -1662,12 +1655,6 @@ Status
VSchemaChangeWithSorting::_external_sorting(vector<RowsetSharedPtr>& src_
rs_readers.push_back(rs_reader);
}
- // get cur schema if rowset schema exist, rowset schema must be newer than
tablet schema
- auto cur_tablet_schema =
src_rowsets.back()->rowset_meta()->tablet_schema();
- if (cur_tablet_schema == nullptr) {
- cur_tablet_schema = new_tablet->tablet_schema();
- }
-
Merger::Statistics stats;
RETURN_IF_ERROR(Merger::vmerge_rowsets(new_tablet, READER_ALTER_TABLE,
new_tablet->tablet_schema(),
rs_readers, rowset_writer,
diff --git a/be/src/vec/exec/volap_scanner.cpp
b/be/src/vec/exec/volap_scanner.cpp
index f7ca037e31..c032aee301 100644
--- a/be/src/vec/exec/volap_scanner.cpp
+++ b/be/src/vec/exec/volap_scanner.cpp
@@ -104,7 +104,7 @@ Status VOlapScanner::prepare(
ss << "failed to initialize storage reader. tablet=" <<
_tablet->full_name()
<< ", res=" << acquire_reader_st
<< ", backend=" << BackendOptions::get_localhost();
- return Status::InternalError(ss.str().c_str());
+ return Status::InternalError(ss.str());
}
}
}
@@ -134,7 +134,7 @@ Status VOlapScanner::open() {
ss << "failed to initialize storage reader. tablet="
<< _tablet_reader_params.tablet->full_name() << ", res=" << res
<< ", backend=" << BackendOptions::get_localhost();
- return Status::InternalError(ss.str().c_str());
+ return Status::InternalError(ss.str());
}
return Status::OK();
}
@@ -255,11 +255,9 @@ Status VOlapScanner::_init_return_columns(bool
need_seq_col) {
continue;
}
- int32_t index = _tablet_schema->field_index(slot->col_unique_id());
- if (index < 0) {
- // rollup/materialized view should use col_name to find index
- index = _tablet_schema->field_index(slot->col_name());
- }
+ int32_t index = slot->col_unique_id() >= 0
+ ?
_tablet_schema->field_index(slot->col_unique_id())
+ :
_tablet_schema->field_index(slot->col_name());
if (index < 0) {
std::stringstream ss;
@@ -352,7 +350,9 @@ Status VOlapScanner::close(RuntimeState* state) {
if (_is_closed) {
return Status::OK();
}
- if (_vconjunct_ctx) _vconjunct_ctx->close(state);
+ if (_vconjunct_ctx) {
+ _vconjunct_ctx->close(state);
+ }
// olap scan node will call scanner.close() when finished
// will release resources here
// if not clear rowset readers in read_params here
diff --git a/regression-test/data/rollup/test_materialized_view_hll.out
b/regression-test/data/rollup/test_materialized_view_hll.out
new file mode 100644
index 0000000000..5d792bd68b
Binary files /dev/null and
b/regression-test/data/rollup/test_materialized_view_hll.out differ
diff --git a/regression-test/suites/rollup/test_materialized_view_hll.groovy
b/regression-test/suites/rollup/test_materialized_view_hll.groovy
new file mode 100644
index 0000000000..fdf57900f0
--- /dev/null
+++ b/regression-test/suites/rollup/test_materialized_view_hll.groovy
@@ -0,0 +1,63 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+suite("test_materialized_view_hll", "rollup") {
+ def tbName1 = "test_materialized_view_hll"
+
+ def getJobState = { tableName ->
+ def jobStateResult = sql """ SHOW ALTER TABLE MATERIALIZED VIEW WHERE
TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1; """
+ return jobStateResult[0][8]
+ }
+ sql "DROP TABLE IF EXISTS ${tbName1}"
+ sql """
+ CREATE TABLE ${tbName1}(
+ record_id int,
+ seller_id int,
+ store_id int,
+ sale_date date,
+ sale_amt bigint
+ )
+ DISTRIBUTED BY HASH(record_id) properties("replication_num" = "1");
+ """
+
+ sql "CREATE materialized VIEW amt_count AS SELECT store_id,
hll_union(hll_hash(sale_amt)) FROM ${tbName1} GROUP BY store_id;"
+ max_try_secs = 60
+ while (max_try_secs--) {
+ String res = getJobState(tbName1)
+ if (res == "FINISHED") {
+ break
+ } else {
+ Thread.sleep(2000)
+ if (max_try_secs < 1) {
+ println "test timeout," + "state:" + res
+ assertEquals("FINISHED",res)
+ }
+ }
+ }
+
+ qt_sql "DESC ${tbName1} ALL;"
+
+ sql "insert into ${tbName1} values(1, 1, 1, '2020-05-30',100);"
+ sql "insert into ${tbName1} values(2, 1, 1, '2020-05-30',100);"
+ qt_sql "SELECT store_id, hll_union_agg(hll_hash(sale_amt)) FROM ${tbName1}
GROUP BY store_id;"
+
+ explain {
+ sql("SELECT store_id, hll_union_agg(hll_hash(sale_amt)) FROM
${tbName1} GROUP BY store_id;")
+ contains "(amt_count)"
+ }
+
+ sql "DROP TABLE ${tbName1} FORCE;"
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]