This is an automated email from the ASF dual-hosted git repository.
dataroaring pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/branch-3.0 by this push:
new aca7e375070 branch-3.0: [test]add test for restore schema not consist
#49849 (#50069)
aca7e375070 is described below
commit aca7e375070066cd78078e223bbae4ae288a79e5
Author: github-actions[bot]
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Wed Apr 30 17:17:41 2025 +0800
branch-3.0: [test]add test for restore schema not consist #49849 (#50069)
Cherry-picked from #49849
Co-authored-by: koarz <[email protected]>
---
.../apache/doris/regression/suite/Syncer.groovy | 36 +++++-
.../test_backup_restore_schema_not_consist.groovy | 140 +++++++++++++++++++++
2 files changed, 173 insertions(+), 3 deletions(-)
diff --git
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Syncer.groovy
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Syncer.groovy
index 2195e7e745a..8f6a66d50eb 100644
---
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Syncer.groovy
+++
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Syncer.groovy
@@ -379,7 +379,7 @@ class Syncer {
String checkSQL = "SHOW BACKUP FROM ${dbName}"
def records = suite.sql(checkSQL)
def allDone = true
- for (row in records) {
+ for (def row in records) {
logger.info("BACKUP row is ${row}")
String state = (row[3] as String);
if (state != "FINISHED" && state != "CANCELLED") {
@@ -402,7 +402,7 @@ class Syncer {
String getSnapshotTimestamp(String repoName, String snapshotName) {
def filterShowSnapshot = { records, name ->
- for (row in records) {
+ for (def row in records) {
logger.info("Snapshot row is ${row}")
if (row[0] == name && row[1] != "null") {
return row
@@ -429,7 +429,7 @@ class Syncer {
String checkSQL = "SHOW RESTORE FROM ${dbName}"
def records = suite.sql(checkSQL)
def allDone = true
- for (row in records) {
+ for (def row in records) {
logger.info("Restore row is ${row}")
String state = row[4]
if (state != "FINISHED" && state != "CANCELLED") {
@@ -439,6 +439,36 @@ class Syncer {
allDone
}
+ Boolean checkRestoreError(String dbName = null, String message = null) {
+ if (dbName == null) {
+ dbName = context.db
+ }
+ String checkSQL = "SHOW RESTORE FROM ${dbName}"
+ def records = suite.sql(checkSQL)
+ def haveError = false
+ def expectMessage = (message == null)
+ for (def row in records) {
+ logger.info("Restore row is ${row}")
+ String mess = row[19]
+ haveError = mess != "[OK]"
+ if (haveError && message != null) {
+ expectMessage = mess.contains(message)
+ }
+ }
+ (haveError && expectMessage)
+ }
+
+ void waitRestoreError(String dbName = null, String message = null) {
+ int count = 0;
+ while (!checkRestoreError(dbName, message)) {
+ if (++count >= 600) { // 30min
+ logger.error('RESTORE task is timeouted')
+ throw new Exception("RESTORE task is timeouted after 30mins")
+ }
+ Thread.sleep(3000)
+ }
+ }
+
void waitAllRestoreFinish(String dbName = null) {
int count = 0;
while (!checkAllRestoreFinish(dbName)) {
diff --git
a/regression-test/suites/backup_restore/test_backup_restore_schema_not_consist.groovy
b/regression-test/suites/backup_restore/test_backup_restore_schema_not_consist.groovy
new file mode 100644
index 00000000000..87df0695839
--- /dev/null
+++
b/regression-test/suites/backup_restore/test_backup_restore_schema_not_consist.groovy
@@ -0,0 +1,140 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.apache.doris.regression.suite.ClusterOptions
+
+suite('test_backup_restore_atomic_schema_not_consist', 'docker') {
+ String suiteName = "test_backup_restore_atomic_schema_not_consist"
+ String repoName = "${suiteName}_repo"
+ String dbName = "${suiteName}_db"
+ String snapshotName = "${suiteName}_snapshot_" +
UUID.randomUUID().toString().replace('-', '')
+ def exist = { res -> Boolean
+ return res.size() != 0
+ }
+ def isNewSchema = { res -> Boolean
+ return res[0][1].contains("`k1` int NULL")
+ }
+
+ def options = new ClusterOptions()
+ // contains 3 frontends
+ options.feNum = 3
+ // contains 3 backends
+ options.beNum = 3
+ // each backend has 1 HDD disk and 3 SSD disks
+ options.beDisks = ['HDD=1', 'SSD=3']
+
+ docker (options) {
+ def syncer = getSyncer()
+ syncer.createS3Repository(repoName)
+ sql "CREATE DATABASE IF NOT EXISTS ${dbName}"
+ sql "DROP TABLE IF EXISTS ${dbName}.t1"
+ sql "DROP TABLE IF EXISTS ${dbName}.t2"
+ sql """
+ CREATE TABLE IF NOT EXISTS ${dbName}.t1(
+ `k` int NULL,
+ `v` int NULL
+ ) ENGINE = OLAP
+ DISTRIBUTED BY HASH(k) BUCKETS 4
+ PROPERTIES (
+ "replication_num" = "3"
+ );
+ """
+ sql """
+ CREATE TABLE IF NOT EXISTS ${dbName}.t2(
+ `k` int NULL,
+ `v` int NULL
+ ) ENGINE = OLAP
+ DISTRIBUTED BY HASH(k) BUCKETS 4
+ PROPERTIES (
+ "replication_num" = "3"
+ );
+ """
+
+ sql """
+ BACKUP SNAPSHOT ${dbName}.${snapshotName}
+ TO `${repoName}`
+ ON (t1, t2)
+ """
+ syncer.waitSnapshotFinish(dbName)
+
+ def snapshot = syncer.getSnapshotTimestamp(repoName, snapshotName)
+ assertTrue(snapshot != null)
+
+ sql " drop table ${dbName}.t1 "
+ sql " drop table ${dbName}.t2 "
+
+ sql """
+ CREATE TABLE IF NOT EXISTS ${dbName}.t1(
+ `k1` int NULL,
+ `v1` int NULL
+ ) ENGINE = OLAP
+ DISTRIBUTED BY HASH(k1) BUCKETS 4
+ PROPERTIES (
+ "replication_num" = "3"
+ );
+ """
+ sql """
+ CREATE TABLE IF NOT EXISTS ${dbName}.t2(
+ `k1` int NULL,
+ `v1` int NULL
+ ) ENGINE = OLAP
+ DISTRIBUTED BY HASH(k1) BUCKETS 4
+ PROPERTIES (
+ "replication_num" = "3"
+ );
+ """
+ def res = sql " show create table ${dbName}.t1 "
+ assertTrue(isNewSchema(res))
+ res = sql " show create table ${dbName}.t2 "
+ assertTrue(isNewSchema(res))
+ sql """
+ RESTORE SNAPSHOT ${dbName}.${snapshotName}
+ FROM `${repoName}`
+ ON ( `t1`, `t2`)
+ PROPERTIES
+ (
+ "backup_timestamp" = "${snapshot}",
+ "reserve_replica" = "true",
+ "atomic_restore" = "true"
+ )
+ """
+ syncer.waitRestoreError(dbName, "already exist but with different
schema")
+
+ def frontends = cluster.getAllFrontends()
+ for (def fe : frontends) {
+ def feUrl =
"jdbc:mysql://${fe.host}:${fe.queryPort}/?useLocalSessionState=false&allowLoadLocalInfile=false"
+ feUrl = context.config.buildUrlWithDb(feUrl, context.dbName)
+ connect('root', '', feUrl) {
+ log.info("connect to ${fe.host}:${fe.queryPort}")
+ sql " use ${dbName} "
+ res = sql " show tables like \"t1\" "
+ assertTrue(exist(res))
+ res = sql " show tables like \"t2\" "
+ assertTrue(exist(res))
+ res = sql " show create table t1 "
+ assertTrue(isNewSchema(res))
+ res = sql " show create table t2 "
+ assertTrue(isNewSchema(res))
+ }
+ }
+
+ sql "DROP TABLE ${dbName}.t1 FORCE"
+ sql "DROP TABLE ${dbName}.t2 FORCE"
+ sql "DROP DATABASE ${dbName} FORCE"
+ sql "DROP REPOSITORY `${repoName}`"
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]