This is an automated email from the ASF dual-hosted git repository.
dataroaring pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/master by this push:
new 056e765d2e5 [feature](docker suite) make docker suite no need external
doris cluster (#40787)
056e765d2e5 is described below
commit 056e765d2e5384695df4abb1ecb60e9642fc83a8
Author: yujun <[email protected]>
AuthorDate: Thu Sep 26 22:45:22 2024 +0800
[feature](docker suite) make docker suite no need external doris cluster
(#40787)
1. Make docker suite no need external doris cluster:
a. run docker suite according to ClusterOptions.cloudMode = true/false;
b. if ClusterOptions.cloudMode = null, user can set it with cmd: `sh
run-regression-test.sh --run docker_action -runMode=cloud/not_cloud`
2. refactor database.py, simplifier its logic;
3. doris compose add cmd options -v for debug log.
4. doris compose print flush buffer.
---
docker/runtime/doris-compose/Dockerfile | 2 +-
docker/runtime/doris-compose/Readme.md | 19 +++
docker/runtime/doris-compose/cluster.py | 6 +-
docker/runtime/doris-compose/command.py | 33 +++---
docker/runtime/doris-compose/database.py | 129 +++++++++------------
docker/runtime/doris-compose/doris-compose.py | 9 +-
.../{requirements.txt => format-code.sh} | 10 +-
docker/runtime/doris-compose/requirements.txt | 1 +
docker/runtime/doris-compose/resource/common.sh | 11 +-
docker/runtime/doris-compose/resource/init_fe.sh | 2 +-
docker/runtime/doris-compose/utils.py | 4 +
.../org/apache/doris/regression/Config.groovy | 69 ++++++++---
.../apache/doris/regression/ConfigOptions.groovy | 10 ++
.../doris/regression/action/ProfileAction.groovy | 4 +-
.../org/apache/doris/regression/suite/Suite.groovy | 29 ++++-
.../doris/regression/suite/SuiteCluster.groovy | 4 -
.../suites/demo_p0/docker_action.groovy | 25 +++-
.../test_abort_txn_by_be_local5.groovy | 1 -
.../test_abort_txn_by_be_local6.groovy | 1 -
.../test_abort_txn_by_fe_local3.groovy | 1 -
run-regression-test.sh | 2 +
21 files changed, 229 insertions(+), 143 deletions(-)
diff --git a/docker/runtime/doris-compose/Dockerfile
b/docker/runtime/doris-compose/Dockerfile
index 2aabe196205..48d94d612df 100644
--- a/docker/runtime/doris-compose/Dockerfile
+++ b/docker/runtime/doris-compose/Dockerfile
@@ -38,7 +38,7 @@ RUN sed -i s@/deb.debian.org/@/mirrors.aliyun.com/@g
/etc/apt/sources.list
RUN apt-get clean
RUN apt-get update && \
- apt-get install -y default-mysql-client python lsof tzdata curl unzip
patchelf jq procps && \
+ apt-get install -y default-mysql-client python lsof tzdata curl unzip
patchelf jq procps util-linux && \
ln -fs /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
dpkg-reconfigure -f noninteractive tzdata && \
apt-get clean
diff --git a/docker/runtime/doris-compose/Readme.md
b/docker/runtime/doris-compose/Readme.md
index 770414f7a2b..c4c4dc0990f 100644
--- a/docker/runtime/doris-compose/Readme.md
+++ b/docker/runtime/doris-compose/Readme.md
@@ -154,3 +154,22 @@ steps:
2. Generate regression-conf-custom.groovy: `python
docker/runtime/doris-compose/doris-compose.py config my-cluster
<doris-root-path> --connect-follow-fe`
3. Run regression test: `bash run-regression-test.sh --run -times 1 -parallel
1 -suiteParallel 1 -d cloud/multi_cluster`
+## Problem investigation
+
+#### Log
+
+Each cluster has logs in /tmp/doris/{cluster-name}/{node-xxx}/log. For each
node, doris compose will also print log in
/tmp/doris/{cluster-name}/{node-xxx}/log/health.out
+
+#### Up cluster using non-detach mode
+
+```
+python docker/runtime/doris-compose/doris-compose.py up ... -no-detach
+```
+
+## Developer
+
+Before submitting code, pls format code.
+
+```
+bash format-code.sh
+```
diff --git a/docker/runtime/doris-compose/cluster.py
b/docker/runtime/doris-compose/cluster.py
index 3a2d95ac986..985ef27113b 100644
--- a/docker/runtime/doris-compose/cluster.py
+++ b/docker/runtime/doris-compose/cluster.py
@@ -16,7 +16,6 @@
# under the License.
import filelock
-import json
import jsonpickle
import os
import os.path
@@ -405,11 +404,12 @@ class FE(Node):
if self.cluster.is_cloud:
cfg += [
"meta_service_endpoint = {}".format(
- self.cluster.get_meta_server_addr()), "",
+ self.cluster.get_meta_server_addr()),
+ "",
"# For regression-test",
"ignore_unsupported_properties_in_cloud_mode = true",
"merge_on_write_forced_to_false = true",
- "deploy_mode = cloud"
+ "deploy_mode = cloud",
]
if self.cluster.sql_mode_node_mgr:
diff --git a/docker/runtime/doris-compose/command.py
b/docker/runtime/doris-compose/command.py
index 3a5afc714dd..48863003223 100644
--- a/docker/runtime/doris-compose/command.py
+++ b/docker/runtime/doris-compose/command.py
@@ -92,7 +92,12 @@ class Command(object):
def run(self, args):
raise Exception("No implemented")
- def _add_parser_output_json(self, parser):
+ def _add_parser_common_args(self, parser):
+ parser.add_argument("-v",
+ "--verbose",
+ default=False,
+ action=self._get_parser_bool_action(True),
+ help="verbose logging.")
parser.add_argument("--output-json",
default=False,
action=self._get_parser_bool_action(True),
@@ -150,7 +155,7 @@ class SimpleCommand(Command):
parser = args_parsers.add_parser(self.command, help=help)
parser.add_argument("NAME", help="Specify cluster name.")
self._add_parser_ids_args(parser)
- self._add_parser_output_json(parser)
+ self._add_parser_common_args(parser)
def run(self, args):
cluster = CLUSTER.Cluster.load(args.NAME)
@@ -180,6 +185,7 @@ class UpCommand(Command):
nargs="?",
help="Specify docker image.")
+ self._add_parser_common_args(parser)
parser.add_argument(
"--cloud",
default=False,
@@ -197,8 +203,6 @@ class UpCommand(Command):
"> 0 max wait seconds, -1 wait unlimited."
)
- self._add_parser_output_json(parser)
-
group1 = parser.add_argument_group("add new nodes",
"add cluster nodes.")
group1.add_argument(
@@ -325,16 +329,14 @@ class UpCommand(Command):
"--be-cluster-id",
default=True,
action=self._get_parser_bool_action(False),
- help=
- "Do not set BE cluster ID in conf. Default is False.")
+ help="Do not set BE cluster ID in conf. Default is False.")
else:
parser.add_argument(
"--no-be-cluster-id",
dest='be_cluster_id',
default=True,
action=self._get_parser_bool_action(False),
- help=
- "Do not set BE cluser ID in conf. Default is False.")
+ help="Do not set BE cluser ID in conf. Default is False.")
parser.add_argument(
"--fdb-version",
@@ -669,7 +671,7 @@ class DownCommand(Command):
"then apply to all containers.")
parser.add_argument("NAME", help="Specify cluster name")
self._add_parser_ids_args(parser)
- self._add_parser_output_json(parser)
+ self._add_parser_common_args(parser)
parser.add_argument(
"--clean",
default=False,
@@ -782,12 +784,9 @@ class ListNode(object):
self.created = ""
self.alive = ""
self.is_master = ""
- self.query_port = ""
self.tablet_num = ""
self.last_heartbeat = ""
self.err_msg = ""
- self.edit_log_port = 0
- self.heartbeat_port = 0
def info(self, detail):
result = [
@@ -825,10 +824,8 @@ class ListNode(object):
if fe:
self.alive = str(fe.alive).lower()
self.is_master = str(fe.is_master).lower()
- self.query_port = fe.query_port
self.last_heartbeat = fe.last_heartbeat
self.err_msg = fe.err_msg
- self.edit_log_port = fe.edit_log_port
elif self.node_type == CLUSTER.Node.TYPE_BE:
self.backend_id = -1
be = db_mgr.get_be(self.id)
@@ -838,7 +835,6 @@ class ListNode(object):
self.tablet_num = be.tablet_num
self.last_heartbeat = be.last_heartbeat
self.err_msg = be.err_msg
- self.heartbeat_port = be.heartbeat_port
class GenConfCommand(Command):
@@ -977,7 +973,7 @@ class ListCommand(Command):
help=
"Specify multiple clusters, if specific, show all their
containers."
)
- self._add_parser_output_json(parser)
+ self._add_parser_common_args(parser)
parser.add_argument("--detail",
default=False,
action=self._get_parser_bool_action(True),
@@ -1021,7 +1017,8 @@ class ListCommand(Command):
if services is None:
return COMPOSE_BAD, {}
return COMPOSE_GOOD, {
- service: ComposeService(
+ service:
+ ComposeService(
service,
list(service_conf["networks"].values())[0]
["ipv4_address"], service_conf["image"])
@@ -1186,7 +1183,7 @@ class GetCloudIniCommand(Command):
help=
"Specify multiple clusters, if specific, show all their
containers."
)
- self._add_parser_output_json(parser)
+ self._add_parser_common_args(parser)
def _handle_data(self, header, datas):
if utils.is_enable_log():
diff --git a/docker/runtime/doris-compose/database.py
b/docker/runtime/doris-compose/database.py
index bbf6fb4fbeb..46cdd961c9f 100644
--- a/docker/runtime/doris-compose/database.py
+++ b/docker/runtime/doris-compose/database.py
@@ -27,21 +27,18 @@ LOG = utils.get_logger()
class FEState(object):
- def __init__(self, id, query_port, is_master, alive, last_heartbeat,
- err_msg, edit_log_port):
+ def __init__(self, id, is_master, alive, last_heartbeat, err_msg):
self.id = id
- self.query_port = query_port
self.is_master = is_master
self.alive = alive
self.last_heartbeat = last_heartbeat
self.err_msg = err_msg
- self.edit_log_port = edit_log_port
class BEState(object):
def __init__(self, id, backend_id, decommissioned, alive, tablet_num,
- last_heartbeat, err_msg, heartbeat_port):
+ last_heartbeat, err_msg):
self.id = id
self.backend_id = backend_id
self.decommissioned = decommissioned
@@ -49,7 +46,6 @@ class BEState(object):
self.tablet_num = tablet_num
self.last_heartbeat = last_heartbeat
self.err_msg = err_msg
- self.heartbeat_port = heartbeat_port
class DBManager(object):
@@ -57,11 +53,8 @@ class DBManager(object):
def __init__(self):
self.fe_states = {}
self.be_states = {}
- self.query_port = -1
self.conn = None
-
- def set_query_port(self, query_port):
- self.query_port = query_port
+ self.master_fe_ip = ""
def get_fe(self, id):
return self.fe_states.get(id, None)
@@ -69,8 +62,8 @@ class DBManager(object):
def get_be(self, id):
return self.be_states.get(id, None)
- def load_states(self, query_ports):
- self._load_fe_states(query_ports)
+ def load_states(self):
+ self._load_fe_states()
self._load_be_states()
def add_fe(self, fe_endpoint):
@@ -189,108 +182,96 @@ class DBManager(object):
LOG.error(f"Failed to create default storage vault: {str(e)}")
raise
- def _load_fe_states(self, query_ports):
+ def _load_fe_states(self):
fe_states = {}
- alive_master_fe_port = None
- for record in self._exec_query('''
- show frontends '''):
- # Unpack the record into individual columns
- name, ip, edit_log_port, _, query_port, _, _, role, is_master,
cluster_id, _, alive, _, _, last_heartbeat, _, err_msg, _, _ = record
- is_master = utils.is_true(is_master)
- alive = utils.is_true(alive)
+ alive_master_fe_ip = None
+ for record in self._exec_query("show frontends"):
+ name = record["Name"]
+ ip = record["Host"]
+ role = record["Role"]
+ is_master = utils.is_true(record["IsMaster"])
+ alive = utils.is_true(record["Alive"])
id = CLUSTER.Node.get_id_from_ip(ip)
- query_port = query_ports.get(id, "")
- last_heartbeat = utils.escape_null(last_heartbeat)
- fe = FEState(id, query_port, is_master, alive, last_heartbeat,
- err_msg, edit_log_port)
+ last_heartbeat = utils.escape_null(record["LastHeartbeat"])
+ err_msg = record["ErrMsg"]
+ fe = FEState(id, is_master, alive, last_heartbeat, err_msg)
fe_states[id] = fe
- if is_master and alive and query_port:
- alive_master_fe_port = query_port
- LOG.info(
+ if is_master and alive:
+ alive_master_fe_ip = ip
+ LOG.debug(
"record of show frontends, name {}, ip {}, alive {}, is_master
{}, role {}"
.format(name, ip, alive, is_master, role))
self.fe_states = fe_states
- if alive_master_fe_port and alive_master_fe_port != self.query_port:
- self.query_port = alive_master_fe_port
+ if alive_master_fe_ip and alive_master_fe_ip != self.master_fe_ip:
+ self.master_fe_ip = alive_master_fe_ip
self._reset_conn()
def _load_be_states(self):
be_states = {}
- for record in self._exec_query('''
- select BackendId, Host, LastHeartbeat, Alive,
SystemDecommissioned, TabletNum, ErrMsg, HeartbeatPort
- from backends()'''):
- backend_id, ip, last_heartbeat, alive, decommissioned, tablet_num,
err_msg, heartbeat_port = record
- backend_id = int(backend_id)
- alive = utils.is_true(alive)
- decommissioned = utils.is_true(decommissioned)
- tablet_num = int(tablet_num)
- id = CLUSTER.Node.get_id_from_ip(ip)
- last_heartbeat = utils.escape_null(last_heartbeat)
- heartbeat_port = utils.escape_null(heartbeat_port)
+ for record in self._exec_query("show backends"):
+ backend_id = int(record["BackendId"])
+ alive = utils.is_true(record["Alive"])
+ decommissioned = utils.is_true(record["SystemDecommissioned"])
+ tablet_num = int(record["TabletNum"])
+ id = CLUSTER.Node.get_id_from_ip(record["Host"])
+ last_heartbeat = utils.escape_null(record["LastHeartbeat"])
+ err_msg = record["ErrMsg"]
be = BEState(id, backend_id, decommissioned, alive, tablet_num,
- last_heartbeat, err_msg, heartbeat_port)
+ last_heartbeat, err_msg)
be_states[id] = be
self.be_states = be_states
+ # return rows, and each row is a record map
def _exec_query(self, sql):
self._prepare_conn()
with self.conn.cursor() as cursor:
cursor.execute(sql)
- return cursor.fetchall()
+ fields = [field_md[0] for field_md in cursor.description
+ ] if cursor.description else []
+ return [dict(zip(fields, row)) for row in cursor.fetchall()]
def _prepare_conn(self):
if self.conn:
return
- if self.query_port <= 0:
- raise Exception("Not set query_port")
self._reset_conn()
def _reset_conn(self):
self.conn = pymysql.connect(user="root",
- host="127.0.0.1",
+ host=self.master_fe_ip,
read_timeout=10,
- port=self.query_port)
+ connect_timeout=3,
+ port=CLUSTER.FE_QUERY_PORT)
def get_db_mgr(cluster_name, required_load_succ=True):
assert cluster_name
db_mgr = DBManager()
- containers = utils.get_doris_containers(cluster_name).get(
- cluster_name, None)
- if not containers:
+ master_fe_ip_file = os.path.join(CLUSTER.get_status_path(cluster_name),
+ "master_fe_ip")
+ master_fe_ip = None
+ if os.path.exists(master_fe_ip_file):
+ with open(master_fe_ip_file, "r") as f:
+ master_fe_ip = f.read().strip()
+
+ if not master_fe_ip:
return db_mgr
- alive_fe_ports = {}
+
+ has_alive_fe = False
+ containers = utils.get_doris_containers(cluster_name).get(cluster_name, [])
for container in containers:
if utils.is_container_running(container):
- _, node_type, id = utils.parse_service_name(container.name)
+ _, node_type, _ = utils.parse_service_name(container.name)
if node_type == CLUSTER.Node.TYPE_FE:
- query_port = utils.get_map_ports(container).get(
- CLUSTER.FE_QUERY_PORT, None)
- if query_port:
- alive_fe_ports[id] = query_port
- if not alive_fe_ports:
+ has_alive_fe = True
+ break
+
+ if not has_alive_fe:
return db_mgr
- master_fe_ip_file = os.path.join(CLUSTER.get_status_path(cluster_name),
- "master_fe_ip")
- query_port = None
- if os.path.exists(master_fe_ip_file):
- with open(master_fe_ip_file, "r") as f:
- master_fe_ip = f.read()
- if master_fe_ip:
- master_id = CLUSTER.Node.get_id_from_ip(master_fe_ip)
- query_port = alive_fe_ports.get(master_id, None)
- if not query_port:
- # A new cluster's master is fe-1
- if 1 in alive_fe_ports:
- query_port = alive_fe_ports[1]
- else:
- query_port = list(alive_fe_ports.values())[0]
-
- db_mgr.set_query_port(query_port)
+ db_mgr.master_fe_ip = master_fe_ip
try:
- db_mgr.load_states(alive_fe_ports)
+ db_mgr.load_states()
except Exception as e:
if required_load_succ:
raise e
diff --git a/docker/runtime/doris-compose/doris-compose.py
b/docker/runtime/doris-compose/doris-compose.py
index 0091b70eae9..a2d3a517553 100644
--- a/docker/runtime/doris-compose/doris-compose.py
+++ b/docker/runtime/doris-compose/doris-compose.py
@@ -45,6 +45,9 @@ def run(args, disable_log, help):
if __name__ == '__main__':
args, help = parse_args()
+ verbose = getattr(args, "verbose", False)
+ if verbose:
+ utils.set_log_verbose()
disable_log = getattr(args, "output_json", False)
if disable_log:
utils.set_enable_log(False)
@@ -53,13 +56,13 @@ if __name__ == '__main__':
try:
data = run(args, disable_log, help)
if disable_log:
- print(utils.pretty_json({"code": 0, "data": data}))
+ print(utils.pretty_json({"code": 0, "data": data}), flush=True)
code = 0
except:
err = traceback.format_exc()
if disable_log:
- print(utils.pretty_json({"code": 1, "err": err}))
+ print(utils.pretty_json({"code": 1, "err": err}), flush=True)
else:
- print(err)
+ print(err, flush=True)
code = 1
sys.exit(code)
diff --git a/docker/runtime/doris-compose/requirements.txt
b/docker/runtime/doris-compose/format-code.sh
similarity index 89%
copy from docker/runtime/doris-compose/requirements.txt
copy to docker/runtime/doris-compose/format-code.sh
index 05258de2df6..0626662e641 100644
--- a/docker/runtime/doris-compose/requirements.txt
+++ b/docker/runtime/doris-compose/format-code.sh
@@ -15,11 +15,5 @@
# specific language governing permissions and limitations
# under the License.
-docker
-docker-compose
-filelock
-jsonpickle
-prettytable
-pymysql
-python-dateutil
-requests<=2.31.0
+yapf -i *.py
+shfmt -w resource/*.sh
diff --git a/docker/runtime/doris-compose/requirements.txt
b/docker/runtime/doris-compose/requirements.txt
index 05258de2df6..2f962ed68d8 100644
--- a/docker/runtime/doris-compose/requirements.txt
+++ b/docker/runtime/doris-compose/requirements.txt
@@ -22,4 +22,5 @@ jsonpickle
prettytable
pymysql
python-dateutil
+#pyyaml==5.4.1
requests<=2.31.0
diff --git a/docker/runtime/doris-compose/resource/common.sh
b/docker/runtime/doris-compose/resource/common.sh
index a1c1b3ff2a5..40833d01dc6 100644
--- a/docker/runtime/doris-compose/resource/common.sh
+++ b/docker/runtime/doris-compose/resource/common.sh
@@ -120,10 +120,11 @@ wait_pid() {
health_log ""
health_log "ps -elf\n$(ps -elf)\n"
if [ -z $pid ]; then
- health_log "pid not exist"
+ health_log "pid $pid not exist"
exit 1
fi
+ health_log "pid $pid exist"
health_log "wait process $pid"
while true; do
ps -p $pid >/dev/null
@@ -132,5 +133,13 @@ wait_pid() {
fi
sleep 1s
done
+
+ health_log "show dmesg -T: "
+ dmesg -T | tail -n 50 | tee -a $LOG_FILE
+
+ health_log "show ps -elf"
+ health_log "ps -elf\n$(ps -elf)\n"
+ health_log "pid $pid not exist"
+
health_log "wait end"
}
diff --git a/docker/runtime/doris-compose/resource/init_fe.sh
b/docker/runtime/doris-compose/resource/init_fe.sh
index d4aad29b0e5..39d3ed3fa93 100755
--- a/docker/runtime/doris-compose/resource/init_fe.sh
+++ b/docker/runtime/doris-compose/resource/init_fe.sh
@@ -45,8 +45,8 @@ fe_daemon() {
sleep 1
output=$(mysql -P $FE_QUERY_PORT -h $MY_IP -u root --execute "SHOW
FRONTENDS;")
code=$?
- health_log "$output"
if [ $code -ne 0 ]; then
+ health_log "exec show frontends bad: $output"
continue
fi
header=$(grep IsMaster <<<$output)
diff --git a/docker/runtime/doris-compose/utils.py
b/docker/runtime/doris-compose/utils.py
index 01dfb64fe42..735947e86bd 100644
--- a/docker/runtime/doris-compose/utils.py
+++ b/docker/runtime/doris-compose/utils.py
@@ -56,6 +56,10 @@ def is_enable_log():
return ENABLE_LOG
+def set_log_verbose():
+ get_logger().setLevel(logging.DEBUG)
+
+
def get_logger(name=None):
global LOG
if LOG != None:
diff --git
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/Config.groovy
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/Config.groovy
index 028bcc71877..5e79ccef21d 100644
---
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/Config.groovy
+++
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/Config.groovy
@@ -45,6 +45,7 @@ class Config {
public String jdbcUrl
public String jdbcUser
public String jdbcPassword
+
public String defaultDb
public String ccrDownstreamUrl
@@ -70,7 +71,7 @@ class Config {
public String metaServiceHttpAddress
public String recycleServiceHttpAddress
- public RunMode isCloudMode = RunMode.UNKNOWN
+ public RunMode runMode = RunMode.UNKNOWN
public String suitePath
public String dataPath
@@ -300,6 +301,20 @@ class Config {
config.dorisComposePath =
FileUtils.getCanonicalPath(config.dorisComposePath)
config.image = cmd.getOptionValue(imageOpt, config.image)
config.dockerEndNoKill = cmd.hasOption(noKillDockerOpt)
+ if (cmd.hasOption(runModeOpt)) {
+ String runMode = cmd.getOptionValue(runModeOpt, "unknown")
+ if (runMode.equalsIgnoreCase("unknown")) {
+ config.runMode = RunMode.UNKNOWN;
+ } else if (runMode.equalsIgnoreCase("cloud")) {
+ config.runMode = RunMode.CLOUD;
+ } else if (runMode.equalsIgnoreCase("not_cloud")) {
+ config.runMode = RunMode.NOT_CLOUD;
+ } else {
+ throw new IllegalStateException("Bad runMode: ${runMode},
should be one of unknown/cloud/not_cloud, "
+ + "if is unknown, fetch it from fe")
+ }
+ }
+ log.info("runMode: ${config.runMode}")
config.suiteWildcard = cmd.getOptionValue(suiteOpt, config.testSuites)
.split(",")
.collect({s -> s.trim()})
@@ -500,8 +515,8 @@ class Config {
Properties props = cmd.getOptionProperties("conf")
config.otherConfigs.putAll(props)
- config.tryCreateDbIfNotExist()
- config.buildUrlWithDefaultDb()
+ // mainly auth_xxx cases use defaultDb, these suites better not use
defaultDb
+ config.createDefaultDb()
return config
}
@@ -922,7 +937,25 @@ class Config {
return null
}
- void tryCreateDbIfNotExist(String dbName = defaultDb) {
+ void createDefaultDb() {
+ String dbName = null
+ try {
+ tryCreateDbIfNotExist(defaultDb)
+ dbName = defaultDb
+ } catch (Exception e) {
+ // defaultDb is not need for most cases.
+ // when run docker suites without external fe/be, createDefaultDb
will fail, but can ignore this exception.
+ // Infact, only mainly auth_xxx cases use defaultDb, and they just
use jdbcUrl in connect function.
+ // And they can avoid using defaultDb too. But modify all these
cases take a lot work.
+ // We better delete all the usage of defaultDb in suites later,
and all suites should use their own db, not the defaultDb.
+ log.warn("create default db failed ${defaultDb}".toString())
+ }
+
+ jdbcUrl = buildUrlWithDb(jdbcUrl, dbName)
+ log.info("Reset jdbcUrl to ${jdbcUrl}".toString())
+ }
+
+ void tryCreateDbIfNotExist(String dbName) {
// connect without specify default db
try {
String sql = "CREATE DATABASE IF NOT EXISTS ${dbName}"
@@ -952,17 +985,20 @@ class Config {
}
}
- boolean fetchRunMode() {
- if (isCloudMode == RunMode.UNKNOWN) {
+ boolean isCloudMode() {
+ fetchCloudMode()
+ return runMode == RunMode.CLOUD
+ }
+
+ void fetchCloudMode() {
+ if (runMode == RunMode.UNKNOWN) {
try {
def result = JdbcUtils.executeToMapArray(getRootConnection(),
"SHOW FRONTEND CONFIG LIKE 'cloud_unique_id'")
- isCloudMode = result[0].Value.toString().isEmpty() ?
RunMode.NOT_CLOUD : RunMode.CLOUD
+ runMode = result[0].Value.toString().isEmpty() ?
RunMode.NOT_CLOUD : RunMode.CLOUD
} catch (Throwable t) {
throw new IllegalStateException("Fetch server config
'cloud_unique_id' failed, jdbcUrl: ${jdbcUrl}", t)
}
}
- return isCloudMode == RunMode.CLOUD
-
}
Connection getConnection() {
@@ -974,12 +1010,16 @@ class Config {
}
Connection getConnectionByDbName(String dbName) {
- String dbUrl = buildUrlWithDb(jdbcUrl, dbName)
+ String dbUrl = getConnectionUrlByDbName(dbName)
tryCreateDbIfNotExist(dbName)
log.info("connect to ${dbUrl}".toString())
return DriverManager.getConnection(dbUrl, jdbcUser, jdbcPassword)
}
+ String getConnectionUrlByDbName(String dbName) {
+ return buildUrlWithDb(jdbcUrl, dbName)
+ }
+
Connection getConnectionByArrowFlightSql(String dbName) {
Class.forName("org.apache.arrow.driver.jdbc.ArrowFlightJdbcDriver")
String arrowFlightSqlHost = otherConfigs.get("extArrowFlightSqlHost")
@@ -1056,12 +1096,11 @@ class Config {
}
}
- public void buildUrlWithDefaultDb() {
- this.jdbcUrl = buildUrlWithDb(jdbcUrl, defaultDb)
- log.info("Reset jdbcUrl to ${jdbcUrl}".toString())
- }
-
public static String buildUrlWithDbImpl(String jdbcUrl, String dbName) {
+ if (!dbName?.trim()) {
+ return jdbcUrl
+ }
+
String urlWithDb = jdbcUrl
String urlWithoutSchema = jdbcUrl.substring(jdbcUrl.indexOf("://") + 3)
if (urlWithoutSchema.indexOf("/") >= 0) {
diff --git
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/ConfigOptions.groovy
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/ConfigOptions.groovy
index 5b220949168..a648eb40a3e 100644
---
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/ConfigOptions.groovy
+++
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/ConfigOptions.groovy
@@ -56,6 +56,7 @@ class ConfigOptions {
static Option sslCertificateOpt
static Option imageOpt
static Option noKillDockerOpt
+ static Option runModeOpt
static Option suiteOpt
static Option excludeSuiteOpt
static Option groupsOpt
@@ -218,6 +219,14 @@ class ConfigOptions {
.desc("don't kill docker containers")
.build()
+ runModeOpt = Option.builder("runMode")
+ .required(false)
+ .hasArg(true)
+ .type(String.class)
+ .longOpt("runMode")
+ .desc("specific run mode: unknown/cloud/not_cloud. if unknow,
will fetch it from fe.")
+ .build()
+
suiteOpt = Option.builder("s")
.argName("suiteName")
.required(false)
@@ -597,6 +606,7 @@ class ConfigOptions {
.addOption(sslCertificateOpt)
.addOption(imageOpt)
.addOption(noKillDockerOpt)
+ .addOption(runModeOpt)
.addOption(confOpt)
.addOption(suiteOpt)
.addOption(excludeSuiteOpt)
diff --git
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/action/ProfileAction.groovy
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/action/ProfileAction.groovy
index b019e6c24aa..5f6c00be943 100644
---
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/action/ProfileAction.groovy
+++
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/action/ProfileAction.groovy
@@ -70,7 +70,7 @@ class ProfileAction implements SuiteAction {
httpCli.op("get")
httpCli.printResponse(false)
- if (context.config.fetchRunMode()) {
+ if (context.config.isCloudMode()) {
httpCli.basicAuthorization(context.config.feCloudHttpUser,
context.config.feCloudHttpPassword)
} else {
httpCli.basicAuthorization(context.config.feHttpUser,
context.config.feHttpPassword)
@@ -92,7 +92,7 @@ class ProfileAction implements SuiteAction {
profileCli.op("get")
profileCli.printResponse(false)
- if (context.config.fetchRunMode()) {
+ if (context.config.isCloudMode()) {
profileCli.basicAuthorization(context.config.feCloudHttpUser,
context.config.feCloudHttpPassword)
} else {
profileCli.basicAuthorization(context.config.feHttpUser,
context.config.feHttpPassword)
diff --git
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy
index f5816ab762d..fb0743eceed 100644
---
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy
+++
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy
@@ -279,6 +279,7 @@ class Suite implements GroovyInterceptable {
)
}
+ // more explaination can see example file: demo_p0/docker_action.groovy
public void docker(ClusterOptions options = new ClusterOptions(), Closure
actionSupplier) throws Exception {
if (context.config.excludeDockerTest) {
return
@@ -289,15 +290,25 @@ class Suite implements GroovyInterceptable {
+ "see example demo_p0/docker_action.groovy")
}
- boolean pipelineIsCloud = isCloudMode()
+ try {
+ context.config.fetchCloudMode()
+ } catch (Exception e) {
+ }
+
boolean dockerIsCloud = false
if (options.cloudMode == null) {
- dockerIsCloud = pipelineIsCloud
+ if (context.config.runMode == RunMode.UNKNOWN) {
+ throw new Exception("Bad run mode, cloud or not_cloud is
unknown")
+ }
+ dockerIsCloud = context.config.runMode == RunMode.CLOUD
} else {
- dockerIsCloud = options.cloudMode
- if (dockerIsCloud != pipelineIsCloud &&
options.skipRunWhenPipelineDiff) {
+ if (options.cloudMode == true && context.config.runMode ==
RunMode.NOT_CLOUD) {
return
}
+ if (options.cloudMode == false && context.config.runMode ==
RunMode.CLOUD) {
+ return
+ }
+ dockerIsCloud = options.cloudMode
}
try {
@@ -558,6 +569,14 @@ class Suite implements GroovyInterceptable {
}
}
+ String getCurDbName() {
+ return context.dbName
+ }
+
+ String getCurDbConnectUrl() {
+ return context.config.getConnectionUrlByDbName(getCurDbName())
+ }
+
long getDbId() {
def dbInfo = sql "show proc '/dbs'"
for(List<Object> row : dbInfo) {
@@ -1459,7 +1478,7 @@ class Suite implements GroovyInterceptable {
}
boolean isCloudMode() {
- return context.config.fetchRunMode()
+ return context.config.isCloudMode()
}
boolean enableStoragevault() {
diff --git
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/SuiteCluster.groovy
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/SuiteCluster.groovy
index 862f437840e..33dfac54d8b 100644
---
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/SuiteCluster.groovy
+++
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/SuiteCluster.groovy
@@ -66,10 +66,6 @@ class ClusterOptions {
// default use 1
Boolean useFollowersMode = false
- // when cloudMode = true/false, but the running pipeline is diff with
cloudMode,
- // skip run this docker test or not.
- boolean skipRunWhenPipelineDiff = true
-
// each be disks, a disks format is: disk_type=disk_num[,disk_capacity]
// here disk_type=HDD or SSD, disk capacity is in gb unit.
// for example: beDisks = ["HDD=1", "SSD=2,10", "SSD=10,3"] means:
diff --git a/regression-test/suites/demo_p0/docker_action.groovy
b/regression-test/suites/demo_p0/docker_action.groovy
index bfe9c0039e2..d13c5d13e54 100644
--- a/regression-test/suites/demo_p0/docker_action.groovy
+++ b/regression-test/suites/demo_p0/docker_action.groovy
@@ -17,13 +17,30 @@
import org.apache.doris.regression.suite.ClusterOptions
-// run docker suite steps:
+// Run docker suite steps:
// 1. Read 'docker/runtime/doris-compose/Readme.md', make sure you can setup a
doris docker cluster;
// 2. update regression-conf-custom.groovy with config:
// image = "xxxx" // your doris docker image
-// excludeDockerTest = false // do run docker suite, default is false
+// excludeDockerTest = false // do run docker suite, default is true
// dockerEndDeleteFiles = false // after run docker suite, whether delete
contains's log and data in directory '/tmp/doris/<suite-name>'
+// When run docker suite, then no need an external doris cluster.
+// But whether run a docker suite, need more check.
+// Firstly, get the pipeline's run mode (cloud or not_cloud):
+// If there's an external doris cluster, then fetch pipeline's runMode from it.
+// If there's no external doris cluster, then set pipeline's runMode with
command args.
+// for example: sh run-regression-test.sh --run docker_action
-runMode=cloud/not_cloud
+// Secondly, compare ClusterOptions.cloudMode and pipeline's runMode
+// If ClusterOptions.cloudMode = null then let ClusterOptions.cloudMode =
pipeline's cloudMode, and run docker suite.
+// if ClusterOptions.cloudMode = true or false, if cloudMode == pipeline's
cloudMode or pipeline's cloudMode is unknown,
+// then run docker suite, otherwise don't run docker suite.
+
+// NOTICE:
+// 1. No need to use code ` if (isCloudMode()) { return } ` in docker suites,
+// instead should use `ClusterOptions.cloudMode = true/false` is enough.
+// Because when run docker suite without an external doris cluster, if suite
use code `isCloudMode()`, it need specific -runMode=cloud/not_cloud.
+// On the contrary, `ClusterOptions.cloudMode = true/false` no need specific
-runMode=cloud/not_cloud when no external doris cluster exists.
+
// need add 'docker' to suite's group, and don't add 'nonConcurrent' to it
suite('docker_action', 'docker') {
// run a new docker
@@ -48,7 +65,7 @@ suite('docker_action', 'docker') {
def options = new ClusterOptions()
// add fe config items
- options.feConfigs = ['example_conf_k1=v1', 'example_conf_k2=v2']
+ options.feConfigs += ['example_conf_k1=v1', 'example_conf_k2=v2']
// contains 5 backends
options.beNum = 5
// each backend has 1 HDD disk and 3 SSD disks
@@ -63,8 +80,6 @@ suite('docker_action', 'docker') {
options2.beNum = 1
// create cloud cluster
options2.cloudMode = true
- //// cloud docker only run in cloud pipeline, but enable it run in
none-cloud pipeline
- // options2.skipRunWhenPipelineDiff = false
// run another docker, create a cloud cluster
docker(options2) {
// cloud cluster will ignore replication_num, always set to 1. so
create table succ even has 1 be.
diff --git
a/regression-test/suites/schema_change_p0/test_abort_txn_by_be_local5.groovy
b/regression-test/suites/schema_change_p0/test_abort_txn_by_be_local5.groovy
index 0df8254ff25..3835da4ccb2 100644
--- a/regression-test/suites/schema_change_p0/test_abort_txn_by_be_local5.groovy
+++ b/regression-test/suites/schema_change_p0/test_abort_txn_by_be_local5.groovy
@@ -21,7 +21,6 @@ import org.apache.http.NoHttpResponseException
suite('test_abort_txn_by_be_local5', 'docker') {
def options = new ClusterOptions()
options.cloudMode = false
- options.skipRunWhenPipelineDiff = false
options.enableDebugPoints()
options.beConfigs += [ "enable_java_support=false" ]
options.feConfigs += [ "enable_abort_txn_by_checking_coordinator_be=true" ]
diff --git
a/regression-test/suites/schema_change_p0/test_abort_txn_by_be_local6.groovy
b/regression-test/suites/schema_change_p0/test_abort_txn_by_be_local6.groovy
index a95d335579b..ff53c412590 100644
--- a/regression-test/suites/schema_change_p0/test_abort_txn_by_be_local6.groovy
+++ b/regression-test/suites/schema_change_p0/test_abort_txn_by_be_local6.groovy
@@ -21,7 +21,6 @@ import org.apache.http.NoHttpResponseException
suite('test_abort_txn_by_be_local6', 'docker') {
def options = new ClusterOptions()
options.cloudMode = false
- options.skipRunWhenPipelineDiff = true
options.enableDebugPoints()
options.beConfigs += [ "enable_java_support=false" ]
options.feConfigs += [ "enable_abort_txn_by_checking_coordinator_be=false"
]
diff --git
a/regression-test/suites/schema_change_p0/test_abort_txn_by_fe_local3.groovy
b/regression-test/suites/schema_change_p0/test_abort_txn_by_fe_local3.groovy
index 355dab05879..32cd9d0eba7 100644
--- a/regression-test/suites/schema_change_p0/test_abort_txn_by_fe_local3.groovy
+++ b/regression-test/suites/schema_change_p0/test_abort_txn_by_fe_local3.groovy
@@ -21,7 +21,6 @@ import org.apache.http.NoHttpResponseException
suite('test_abort_txn_by_fe_local3', 'docker') {
def options = new ClusterOptions()
options.cloudMode = false
- options.skipRunWhenPipelineDiff = false
options.enableDebugPoints()
options.beConfigs += [ "enable_java_support=false" ]
options.feConfigs += [ "enable_abort_txn_by_checking_coordinator_be=false"
]
diff --git a/run-regression-test.sh b/run-regression-test.sh
index 6357f4111a7..16256152887 100755
--- a/run-regression-test.sh
+++ b/run-regression-test.sh
@@ -46,6 +46,8 @@ Usage: $0 <shell_options> <framework_options>
-dockerSuiteParallel run docker tests using specified threads
-randomOrder run tests in a random order
-noKillDocker don't kill container when finish docker
suites
+ -runMode if run docker suites, no need to setup
external doris clusters.
+ user may specify run mode: cloud or
not_cloud.
-times rum tests {times} times
Eg.
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]