This is an automated email from the ASF dual-hosted git repository.
kxiao pushed a commit to branch branch-2.0
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/branch-2.0 by this push:
new 0fd000d3f0f Fix](schema change) fix metadata inconsistency bug caused
by enable light schema change (#28255)
0fd000d3f0f is described below
commit 0fd000d3f0f0f39f178a1ac06951956fbb51feb3
Author: Luwei <[email protected]>
AuthorDate: Thu Dec 14 20:54:48 2023 +0800
Fix](schema change) fix metadata inconsistency bug caused by enable light
schema change (#28255)
---
be/src/service/internal_service.cpp | 31 +++-
.../test_enable_light_schema_change.groovy | 156 +++++++++++++++++++++
2 files changed, 186 insertions(+), 1 deletion(-)
diff --git a/be/src/service/internal_service.cpp
b/be/src/service/internal_service.cpp
index df88be44ba7..7a9abe77159 100644
--- a/be/src/service/internal_service.cpp
+++ b/be/src/service/internal_service.cpp
@@ -707,6 +707,7 @@ void PInternalServiceImpl::_get_column_ids_by_tablet_ids(
int64_t index_id = param.indexid();
auto tablet_ids = param.tablet_ids();
std::set<std::set<int32_t>> filter_set;
+ std::map<int32_t, const TabletColumn*> id_to_column;
for (const int64_t tablet_id : tablet_ids) {
TabletSharedPtr tablet = tablet_mgr->get_tablet(tablet_id);
if (tablet == nullptr) {
@@ -719,17 +720,45 @@ void PInternalServiceImpl::_get_column_ids_by_tablet_ids(
}
// check schema consistency, column ids should be the same
const auto& columns = tablet->tablet_schema()->columns();
+
std::set<int32_t> column_ids;
for (const auto& col : columns) {
column_ids.insert(col.unique_id());
}
filter_set.insert(column_ids);
+
+ if (id_to_column.empty()) {
+ for (const auto& col : columns) {
+ id_to_column.insert(std::pair {col.unique_id(), &col});
+ }
+ } else {
+ for (const auto& col : columns) {
+ auto it = id_to_column.find(col.unique_id());
+ if (it == id_to_column.end() || *(it->second) != col) {
+ ColumnPB prev_col_pb;
+ ColumnPB curr_col_pb;
+ if (it != id_to_column.end()) {
+ it->second->to_schema_pb(&prev_col_pb);
+ }
+ col.to_schema_pb(&curr_col_pb);
+ std::stringstream ss;
+ ss << "consistency check failed: index{ " << index_id
<< " }"
+ << " got inconsistent schema, prev column: " <<
prev_col_pb.DebugString()
+ << " current column: " << curr_col_pb.DebugString();
+ LOG(WARNING) << ss.str();
+
response->mutable_status()->set_status_code(TStatusCode::ILLEGAL_STATE);
+ response->mutable_status()->add_error_msgs(ss.str());
+ return;
+ }
+ }
+ }
}
+
if (filter_set.size() > 1) {
// consistecy check failed
std::stringstream ss;
ss << "consistency check failed: index{" << index_id << "}"
- << "got inconsistent shema";
+ << "got inconsistent schema";
LOG(WARNING) << ss.str();
response->mutable_status()->set_status_code(TStatusCode::ILLEGAL_STATE);
response->mutable_status()->add_error_msgs(ss.str());
diff --git
a/regression-test/suites/schema_change_p0/test_enable_light_schema_change.groovy
b/regression-test/suites/schema_change_p0/test_enable_light_schema_change.groovy
new file mode 100644
index 00000000000..0c09af5e5de
--- /dev/null
+++
b/regression-test/suites/schema_change_p0/test_enable_light_schema_change.groovy
@@ -0,0 +1,156 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import java.util.Date
+import java.text.SimpleDateFormat
+import org.apache.http.HttpResponse
+import org.apache.http.client.methods.HttpPut
+import org.apache.http.impl.client.CloseableHttpClient
+import org.apache.http.impl.client.HttpClients
+import org.apache.http.entity.ContentType
+import org.apache.http.entity.StringEntity
+import org.apache.http.client.config.RequestConfig
+import org.apache.http.client.RedirectStrategy
+import org.apache.http.protocol.HttpContext
+import org.apache.http.HttpRequest
+import org.apache.http.impl.client.LaxRedirectStrategy
+import org.apache.http.client.methods.RequestBuilder
+import org.apache.http.entity.StringEntity
+import org.apache.http.client.methods.CloseableHttpResponse
+import org.apache.http.util.EntityUtils
+
+suite("test_enable_light_schema_change", "p0") {
+ def tableName1 = "test_enable_lsc"
+ def tableName2 = "test_enable_lsc_normal"
+
+ def getJobState = { tableName ->
+ def jobStateResult = sql """ SHOW ALTER TABLE COLUMN WHERE
IndexName='${tableName}' ORDER BY createtime DESC LIMIT 1 """
+ return jobStateResult[0][9]
+ }
+
+ def getCreateViewState = { tableName ->
+ def createViewStateResult = sql """ SHOW ALTER TABLE MATERIALIZED VIEW
WHERE IndexName='${tableName}' ORDER BY createtime DESC LIMIT 1 """
+ return createViewStateResult[0][8]
+ }
+
+ sql """ DROP TABLE IF EXISTS ${tableName1} """
+
+ sql """
+ CREATE TABLE ${tableName1}
+ (
+ k1 DATE,
+ k2 int NOT NULL DEFAULT "1",
+ k3 CHAR(10) COMMENT "string column",
+ k4 INT NOT NULL DEFAULT "1" COMMENT "int column"
+ )
+ unique KEY(k1, k2)
+ COMMENT "my first table"
+ PARTITION BY RANGE(k1)
+ (
+ PARTITION p1 VALUES LESS THAN ("2020-02-01"),
+ PARTITION p2 VALUES LESS THAN ("2020-03-01"),
+ PARTITION p3 VALUES LESS THAN ("2020-04-01")
+ )
+ DISTRIBUTED BY HASH(k1) BUCKETS 1
+ PROPERTIES (
+ "replication_num" = "1",
+ "light_schema_change" = "false"
+ );
+ """
+
+ sql """ insert into ${tableName1} values ('2020-02-10', 2, 'a', 4) """
+
+ sql """ alter table ${tableName1} order by (k1, k2, k4, k3) """
+
+ max_try_num = 60
+ while (max_try_num--) {
+ String res = getJobState(tableName1)
+ if (res == "FINISHED" || res == "CANCELLED") {
+ assertEquals("FINISHED", res)
+ sleep(3000)
+ break
+ } else {
+ sleep(1000)
+ if (max_try_num < 1) {
+ println "test timeout," + "state:" + res
+ assertEquals("FINISHED",res)
+ }
+ }
+ }
+
+ sql """ alter table ${tableName1} ADD PARTITION p4 VALUES LESS THAN
("2020-05-01") """
+ sql """ insert into ${tableName1} values ('2020-04-10', 2, 5, 'b') """
+
+ test {
+ sql """ alter table ${tableName1} set ("light_schema_change"="true")
"""
+ exception "errCode = 2, detailMessage = failed to enable light schema
change for table"
+ }
+
+ sql """ select * from ${tableName1} """
+
+ sql """ DROP TABLE IF EXISTS ${tableName2} """
+ sql """
+ CREATE TABLE ${tableName2}
+ (
+ k1 DATE,
+ k2 int NOT NULL DEFAULT "1",
+ k3 CHAR(10) COMMENT "string column",
+ k4 INT NOT NULL DEFAULT "1" COMMENT "int column"
+ )
+ duplicate KEY(k1, k2)
+ COMMENT "my first table"
+ PARTITION BY RANGE(k1)
+ (
+ PARTITION p1 VALUES LESS THAN ("2020-02-01"),
+ PARTITION p2 VALUES LESS THAN ("2020-03-01"),
+ PARTITION p3 VALUES LESS THAN ("2020-04-01")
+ )
+ DISTRIBUTED BY HASH(k1) BUCKETS 1
+ PROPERTIES (
+ "replication_num" = "1",
+ "light_schema_change" = "false"
+ );
+ """
+
+ sql """ insert into ${tableName2} values ('2020-02-10', 2, 'a', 4) """
+
+ sql """ alter table ${tableName2} ADD COLUMN k5 string after k4"""
+
+ max_try_num = 60
+ while (max_try_num--) {
+ String res = getJobState(tableName2)
+ if (res == "FINISHED" || res == "CANCELLED") {
+ assertEquals("FINISHED", res)
+ sleep(3000)
+ break
+ } else {
+ sleep(1000)
+ if (max_try_num < 1) {
+ println "test timeout," + "state:" + res
+ assertEquals("FINISHED",res)
+ }
+ }
+ }
+
+ sql """ alter table ${tableName2} ADD PARTITION p4 VALUES LESS THAN
("2020-05-01") """
+ sql """ insert into ${tableName2} values ('2020-04-10', 2, 'b', 5, 'test')
"""
+
+ sql """ alter table ${tableName2} set ("light_schema_change"="true") """
+
+ sql """ select * from ${tableName2} """
+}
+
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]