This is an automated email from the ASF dual-hosted git repository.
dataroaring pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/master by this push:
new 635463ab1f2 [test](cloud)add plugin for test run cloud mode cluster
cases (#31960)
635463ab1f2 is described below
commit 635463ab1f25101be028d797646cb800aaf5471f
Author: chunping <[email protected]>
AuthorDate: Mon Mar 11 16:39:45 2024 +0800
[test](cloud)add plugin for test run cloud mode cluster cases (#31960)
---
.../plugins/cloud_abnormal_plugin.groovy | 371 ++++++++++++++++++
.../plugins/cloud_cluster_op_plugin.groovy | 86 +++++
.../plugins/cloud_compaction_plugin.groovy | 112 ++++++
.../plugins/cloud_recycler_plugin.groovy | 195 ++++++++++
.../plugins/cloud_resource_plugin.groovy | 76 ++++
regression-test/plugins/cloud_smoke_plugin.groovy | 48 +++
.../plugins/cloud_smooth_upgrade.groovy | 65 ++++
.../plugins/cloud_stage_ram_plugin.groovy | 424 +++++++++++++++++++++
8 files changed, 1377 insertions(+)
diff --git a/regression-test/plugins/cloud_abnormal_plugin.groovy
b/regression-test/plugins/cloud_abnormal_plugin.groovy
new file mode 100644
index 00000000000..dbfb3e07e8d
--- /dev/null
+++ b/regression-test/plugins/cloud_abnormal_plugin.groovy
@@ -0,0 +1,371 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+import org.apache.doris.regression.suite.Suite
+
+import com.google.common.base.Strings;
+import net.schmizz.sshj.SSHClient;
+import net.schmizz.sshj.common.IOUtils;
+import net.schmizz.sshj.connection.channel.direct.Session;
+import net.schmizz.sshj.connection.channel.direct.Session.Command;
+
+import groovy.json.JsonSlurper;
+import java.util.concurrent.TimeUnit;
+
+void checkProcessName(String processName) throws Exception {
+ if (processName in ["fe", "be", "ms"]) {
+ return
+ }
+ throw new Exception("Invalid process name: " + processName)
+}
+
+Suite.metaClass.loadClusterMap = { String clusterFile /* param */ ->
+ def clusterMap = null;
+ try {
+ if (Strings.isNullOrEmpty(clusterFile)) {
+ throw new Exception("Empty cluster file")
+ }
+ clusterMap = new JsonSlurper().parse(new FileReader(clusterFile))
+ return clusterMap
+ } finally {
+ logger.debug("clusterFile:{}, clusterMap:{}", clusterFile, clusterMap);
+ }
+}
+
+Suite.metaClass.executeCommand = { String nodeIp, String commandStr /* param
*/ ->
+ Suite suite = delegate as Suite
+
+ final SSHClient ssh = new SSHClient()
+ ssh.loadKnownHosts()
+ ssh.connect(nodeIp)
+ Session session = null
+ try {
+ logger.debug("user.name:{}", System.getProperty("user.name"))
+ ssh.authPublickey(System.getProperty("user.name"))
+ session = ssh.startSession()
+ final Command cmd = session.exec(commandStr)
+ cmd.join(30, TimeUnit.SECONDS)
+ def code = cmd.getExitStatus()
+ def out = IOUtils.readFully(cmd.getInputStream()).toString()
+ def err = IOUtils.readFully(cmd.getErrorStream()).toString()
+ def errMsg = cmd.getExitErrorMessage()
+ logger.debug("commandStr:${commandStr}")
+ logger.debug("code:${code}, out:${out}, err:${err}, errMsg:${errMsg}")
+ assertEquals(0, code)
+ } finally {
+ try {
+ if (session != null) {
+ session.close()
+ }
+ } catch (IOException e) {
+ logger.warn(e);
+ }
+ ssh.disconnect()
+ }
+ return
+}
+
+Suite.metaClass.stopProcess = { String nodeIp, String processName, String
installPath /* param */ ->
+ Suite suite = delegate as Suite
+ checkProcessName(processName)
+
+ logger.debug("stopProcess(): nodeIp=${nodeIp} installPath=${installPath}
processName=${processName}")
+ String commandStr
+ if (processName.trim().equalsIgnoreCase("ms")) {
+ commandStr = "bash -c \"${installPath}/bin/stop.sh\""
+ } else {
+ commandStr = "bash -c \"${installPath}/bin/stop_${processName}.sh\""
+ }
+
+ executeCommand(nodeIp, commandStr)
+ return
+}
+
+Suite.metaClass.startProcess = { String nodeIp, String processName, String
installPath /* param */ ->
+ Suite suite = delegate as Suite
+ checkProcessName(processName);
+
+ logger.debug("startProcess(): nodeIp=${nodeIp} installPath=${installPath}
processName=${processName}");
+
+ String commandStr
+ if (processName.trim().equalsIgnoreCase("ms")) {
+ commandStr = "bash -c \"${installPath}/bin/start.sh --meta-service
--daemon\"";
+ } else {
+ commandStr = "bash -c \"${installPath}/bin/start_${processName}.sh
--daemon\"";
+ }
+
+ executeCommand(nodeIp, commandStr)
+ return;
+}
+
+Suite.metaClass.checkProcessAlive = { String nodeIp, String processName,
String installPath /* param */ ->
+ Suite suite = delegate as Suite
+ logger.debug("checkProcessAlive(): nodeIp=${nodeIp}
installPath=${installPath} processName=${processName}")
+ checkProcessName(processName)
+
+ String commandStr = null;
+ if (processName.trim().equalsIgnoreCase("fe")) {
+ commandStr = "bash -c \"ps aux | grep ${installPath}/log/fe.gc.log |
grep -v grep\""
+ }
+
+ if (processName.trim().equalsIgnoreCase("be")) {
+ commandStr = "bash -c \"ps aux | grep ${installPath}/lib/doris_be |
grep -v grep\""
+ }
+
+ if (processName.trim().equalsIgnoreCase("ms")) {
+ commandStr = "bash -c \"ps aux | grep
'${installPath}/lib/selectdb_cloud --meta-service' | grep -v grep\""
+ }
+
+ executeCommand(nodeIp, commandStr)
+ return
+}
+
+Suite.metaClass.restartProcess = { String nodeIp, String processName, String
installPath /* param */ ->
+ Suite suite = delegate as Suite
+ logger.debug("restartProcess(): nodeIp=${nodeIp}
installPath=${installPath} processName=${processName}")
+ checkProcessName(processName)
+
+ stopProcess(nodeIp, processName, installPath)
+ sleep(1000)
+ startProcess(nodeIp, processName, installPath)
+
+ int tryTimes = 3
+ while (tryTimes-- > 0) {
+ try {
+ checkProcessAlive(nodeIp, processName, installPath)
+ break
+ } catch (Exception e) {
+ logger.info("checkProcessAlive failed, tryTimes=${tryTimes}")
+ if (tryTimes <= 0) {
+ throw e
+ }
+ sleep(5000)
+ }
+ }
+}
+
+Suite.metaClass.checkBrokerLoadLoading = { String label /* param */ ->
+ int tryTimes = 600
+ while (tryTimes-- > 0) {
+ def stateResult = sql "show load where Label = '${label}'"
+ def loadState = stateResult[stateResult.size() - 1][2].toString()
+ if ("pending".equalsIgnoreCase(loadState)) {
+ if (tryTimes <= 1) {
+ throw new IllegalStateException("check load ${label} timeout")
+ }
+ sleep(1000)
+ continue
+ }
+
+ logger.info("stateResult:{}", stateResult)
+ if ("loading".equalsIgnoreCase(loadState)) {
+ break
+ }
+ if ("cancelled".equalsIgnoreCase(loadState)) {
+ throw new IllegalStateException("load ${label} has been cancelled")
+ }
+ if ("finished".equalsIgnoreCase(loadState)) {
+ throw new IllegalStateException("load ${label} has been finished")
+ }
+ }
+}
+
+Suite.metaClass.checkBrokerLoadFinished = { String label /* param */ ->
+ int tryTimes = 20
+ while (tryTimes-- > 0) {
+ def stateResult = sql "show load where Label = '${label}'"
+ def loadState = stateResult[stateResult.size() - 1][2].toString()
+ if ('cancelled'.equalsIgnoreCase(loadState)) {
+ logger.info("stateResult:{}", stateResult)
+ throw new IllegalStateException("load ${label} has been cancelled")
+ } else if ('finished'.equalsIgnoreCase(loadState)) {
+ logger.info("stateResult:{}", stateResult)
+ break
+ }
+
+ if (tryTimes <= 1) {
+ throw new IllegalStateException("check load ${label} timeout")
+ }
+ sleep(60000)
+ }
+}
+
+Suite.metaClass.checkCopyIntoLoading = { String label /* param */ ->
+ int tryTimes = 600
+ while (tryTimes-- > 0) {
+ def stateResult = sql "show copy where label like '${label}'"
+ def loadState = stateResult[stateResult.size() - 1][3].toString()
+ if ("pending".equalsIgnoreCase(loadState)) {
+ if (tryTimes <= 1) {
+ throw new IllegalStateException("check copy into ${label}
timeout")
+ }
+ sleep(1000)
+ continue
+ }
+
+ logger.info("stateResult:{}", stateResult)
+ if ("loading".equalsIgnoreCase(loadState)) {
+ break
+ }
+ if ("cancelled".equalsIgnoreCase(loadState)) {
+ throw new IllegalStateException("copy into ${label} has been
cancelled")
+ }
+ if ("finished".equalsIgnoreCase(loadState)) {
+ throw new IllegalStateException("copy into ${label} has been
finished")
+ }
+ }
+}
+
+Suite.metaClass.checkCopyIntoFinished = { String label /* param */ ->
+ int tryTimes = 20
+ while (tryTimes-- > 0) {
+ def stateResult = sql "show copy where label like '${label}'"
+ def loadState = stateResult[stateResult.size() - 1][3].toString()
+ if ('cancelled'.equalsIgnoreCase(loadState)) {
+ logger.info("stateResult:{}", stateResult)
+ throw new IllegalStateException("copy into ${label} has been
cancelled")
+ } else if ('finished'.equalsIgnoreCase(loadState)) {
+ logger.info("stateResult:{}", stateResult)
+ break
+ }
+ sleep(60000)
+ }
+}
+
+Suite.metaClass.waitSchemaChangeJobRunning = { String tableName /* param */ ->
+ int tryTimes = 600
+ while (tryTimes-- > 0) {
+ def jobResult = sql """SHOW ALTER TABLE COLUMN WHERE
IndexName='${tableName}' ORDER BY createtime DESC LIMIT 1 """
+ def jobState = jobResult[0][9].toString()
+ if ("pending".equalsIgnoreCase(jobState) ||
"WAITING_TXN".equalsIgnoreCase(jobState)) {
+ sleep(1000)
+ continue
+ }
+
+ logger.info("jobResult:{}", jobResult)
+ if ("running".equalsIgnoreCase(jobState)) {
+ break
+ }
+ if ("cancelled".equalsIgnoreCase(jobState)) {
+ throw new IllegalStateException("${tableName}'s job has been
cancelled")
+ }
+ if ("finished".equalsIgnoreCase(jobState)) {
+ throw new IllegalStateException("${tableName}'s job has been
finished")
+ }
+ }
+}
+
+Suite.metaClass.waitSchemaChangeJobFinished = { String tableName /* param */ ->
+ int tryTimes = 20
+ while (tryTimes-- > 0) {
+ def jobResult = sql """SHOW ALTER TABLE COLUMN WHERE
IndexName='${tableName}' ORDER BY createtime DESC LIMIT 1 """
+ def jobState = jobResult[0][9].toString()
+ if ('cancelled'.equalsIgnoreCase(jobState)) {
+ logger.info("jobResult:{}", jobResult)
+ throw new IllegalStateException("${tableName}'s job has been
cancelled")
+ }
+ if ('finished'.equalsIgnoreCase(jobState)) {
+ logger.info("jobResult:{}", jobResult)
+ break
+ }
+ sleep(60000)
+ }
+}
+
+Suite.metaClass.waitRollupJobRunning = { String tableName /* param */ ->
+ int tryTimes = 600
+ while (tryTimes-- > 0) {
+ def jobResult = sql """SHOW ALTER TABLE ROLLUP WHERE
TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1"""
+ def jobState = jobResult[0][8].toString()
+ if ("pending".equalsIgnoreCase(jobState) ||
"WAITING_TXN".equalsIgnoreCase(jobState)) {
+ sleep(1000)
+ continue
+ }
+
+ logger.info("jobResult:{}", jobResult)
+ if ("running".equalsIgnoreCase(jobState)) {
+ break
+ }
+ if ("cancelled".equalsIgnoreCase(jobState)) {
+ throw new IllegalStateException("${tableName}'s job has been
cancelled")
+ }
+ if ("finished".equalsIgnoreCase(jobState)) {
+ throw new IllegalStateException("${tableName}'s job has been
finished")
+ }
+ }
+}
+
+Suite.metaClass.waitRollupJobFinished = { String tableName /* param */ ->
+ int tryTimes = 20
+ while (tryTimes-- > 0) {
+ def jobResult = sql """SHOW ALTER TABLE ROLLUP WHERE
TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1"""
+ def jobState = jobResult[0][8].toString()
+ if ('cancelled'.equalsIgnoreCase(jobState)) {
+ logger.info("jobResult:{}", jobResult)
+ throw new IllegalStateException("${tableName}'s job has been
cancelled")
+ }
+ if ('finished'.equalsIgnoreCase(jobState)) {
+ logger.info("jobResult:{}", jobResult)
+ break
+ }
+ sleep(60000)
+ }
+}
+
+Suite.metaClass.waitMvJobRunning = { String tableName /* param */ ->
+ int tryTimes = 600
+ while (tryTimes-- > 0) {
+ def jobResult = sql """SHOW ALTER TABLE MATERIALIZED VIEW WHERE
TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1 """
+ def jobState = jobResult[0][8].toString()
+ if ("pending".equalsIgnoreCase(jobState) ||
"WAITING_TXN".equalsIgnoreCase(jobState)) {
+ sleep(1000)
+ continue
+ }
+
+ logger.info("jobResult:{}", jobResult)
+ if ("running".equalsIgnoreCase(jobState)) {
+ break
+ }
+ if ("cancelled".equalsIgnoreCase(jobState)) {
+ throw new IllegalStateException("${tableName}'s job has been
cancelled")
+ }
+ if ("finished".equalsIgnoreCase(jobState)) {
+ throw new IllegalStateException("${tableName}'s job has been
finished")
+ }
+ }
+}
+
+Suite.metaClass.waitMvJobFinished = { String tableName /* param */ ->
+ int tryTimes = 20
+ while (tryTimes-- > 0) {
+ def jobResult = sql """SHOW ALTER TABLE MATERIALIZED VIEW WHERE
TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1 """
+ def jobState = jobResult[0][8].toString()
+ if ('cancelled'.equalsIgnoreCase(jobState)) {
+ logger.info("jobResult:{}", jobResult)
+ throw new IllegalStateException("${tableName}'s job has been
cancelled")
+ }
+ if ('finished'.equalsIgnoreCase(jobState)) {
+ logger.info("jobResult:{}", jobResult)
+ break
+ }
+ sleep(60000)
+ }
+}
+
+
+
+
+
diff --git a/regression-test/plugins/cloud_cluster_op_plugin.groovy
b/regression-test/plugins/cloud_cluster_op_plugin.groovy
new file mode 100644
index 00000000000..3f22f3eab92
--- /dev/null
+++ b/regression-test/plugins/cloud_cluster_op_plugin.groovy
@@ -0,0 +1,86 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+import groovy.json.JsonOutput
+import org.apache.doris.regression.suite.Suite
+
+Suite.metaClass.addClusterLb = { be_unique_id, ip, port, cluster_name,
cluster_id, public_endpoint, private_endpoint ->
+ def jsonOutput = new JsonOutput()
+ def s3 = [
+ type: "COMPUTE",
+ cluster_name : cluster_name,
+ cluster_id : cluster_id,
+ public_endpoint: public_endpoint,
+ private_endpoint: private_endpoint,
+ nodes: [
+ [
+ cloud_unique_id: be_unique_id,
+ ip: ip,
+ heartbeat_port: port
+ ],
+ ]
+ ]
+ def map = [instance_id: "${instance_id}", cluster: s3]
+ def js = jsonOutput.toJson(map)
+ log.info("add cluster req: ${js} ".toString())
+
+ def add_cluster_api = { request_body, check_func ->
+ httpTest {
+ endpoint context.config.metaServiceHttpAddress
+ uri "/MetaService/http/add_cluster?token=${token}"
+ body request_body
+ check check_func
+ }
+ }
+
+ add_cluster_api.call(js) {
+ respCode, body ->
+ log.info("add cluster resp: ${body} ${respCode}".toString())
+ def json = parseJson(body)
+ assertTrue(json.code.equalsIgnoreCase("OK") ||
json.code.equalsIgnoreCase("ALREADY_EXISTED"))
+ }
+}
+
+Suite.metaClass.updateClusterEndpoint = { cluster_id, public_endpoint,
private_endpoint ->
+ def jsonOutput = new JsonOutput()
+ def s3 = [
+ cluster_id : cluster_id,
+ public_endpoint: public_endpoint,
+ private_endpoint: private_endpoint,
+ ]
+ def map = [instance_id: "${instance_id}", cluster: s3]
+ def js = jsonOutput.toJson(map)
+ log.info("add cluster req: ${js} ".toString())
+
+ def add_cluster_api = { request_body, check_func ->
+ httpTest {
+ endpoint context.config.metaServiceHttpAddress
+ uri "/MetaService/http/update_cluster_endpoint?token=${token}"
+ body request_body
+ check check_func
+ }
+ }
+
+ add_cluster_api.call(js) {
+ respCode, body ->
+ log.info("update cluster endpoint: ${body} ${respCode}".toString())
+ def json = parseJson(body)
+ assertTrue(json.code.equalsIgnoreCase("OK") ||
json.code.equalsIgnoreCase("ALREADY_EXISTED"))
+ }
+}
+
+logger.info("Added 'cloud cluster op' function to Suite")
+
diff --git a/regression-test/plugins/cloud_compaction_plugin.groovy
b/regression-test/plugins/cloud_compaction_plugin.groovy
new file mode 100644
index 00000000000..8381fd28a8e
--- /dev/null
+++ b/regression-test/plugins/cloud_compaction_plugin.groovy
@@ -0,0 +1,112 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.apache.doris.regression.suite.Suite
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+Suite.metaClass.doCloudCompaction = { String tableName /* param */ ->
+ // which suite invoke current function?
+ Suite suite = delegate as Suite
+ //function body
+ suite.getLogger().info("Test plugin: suiteName: ${suite.name},
tableName:${tableName}".toString())
+
+
//BackendId,Cluster,IP,HeartbeatPort,BePort,HttpPort,BrpcPort,LastStartTime,LastHeartbeat,Alive,SystemDecommissioned,ClusterDecommissioned,TabletNum,DataUsedCapacity,AvailCapacity,TotalCapacity,UsedPct,MaxDiskUsedPct,RemoteUsedCapacity,Tag,ErrMsg,Version,Status
+ String[][] backends = sql """ show backends; """
+ suite.getLogger().info("backends: ${backends}".toString())
+
+ assertTrue(backends.size() > 0)
+ def backendIdToBackendIP = [:]
+ def backendIdToBackendHttpPort = [:]
+ def clusterToBackendId = [:]
+
+ for (String[] backend in backends) {
+ backendIdToBackendIP.put(backend[0], backend[2])
+ backendIdToBackendHttpPort.put(backend[0], backend[5])
+ def tagJson = parseJson(backend[19])
+ if (!clusterToBackendId.containsKey(tagJson.cloud_cluster_name)) {
+ clusterToBackendId.put(tagJson.cloud_cluster_name, backend[0])
+ }
+ }
+ suite.getLogger().info("backendIdToBackendIP:
${backendIdToBackendIP}".toString())
+ suite.getLogger().info("backendIdToBackendHttpPort:
${backendIdToBackendHttpPort}".toString())
+ suite.getLogger().info("clusterToBackendId:
${clusterToBackendId}".toString())
+
+ def cluster0 = clusterToBackendId.keySet()[0]
+ def backend_id0 = clusterToBackendId.get(cluster0)
+
+
//TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,PathHash,MetaUrl,CompactionStatus
+ String[][] tablets = sql """ show tablets from ${tableName}; """
+ def doCompaction = { backend_id, compact_type ->
+ // trigger compactions for all tablets in ${tableName}
+ for (String[] tablet in tablets) {
+ Thread.sleep(10)
+ String tablet_id = tablet[0]
+ StringBuilder sb = new StringBuilder();
+ sb.append("curl -X POST http://")
+ sb.append(backendIdToBackendIP.get(backend_id))
+ sb.append(":")
+ sb.append(backendIdToBackendHttpPort.get(backend_id))
+ sb.append("/api/compaction/run?tablet_id=")
+ sb.append(tablet_id)
+ sb.append("&compact_type=${compact_type}")
+
+ String command = sb.toString()
+ process = command.execute()
+ code = process.waitFor()
+ err = IOGroovyMethods.getText(new BufferedReader(new
InputStreamReader(process.getErrorStream())));
+ out = process.getText()
+ logger.info("Run compaction: code=" + code + ", out=" + out + ",
err=" + err)
+ assertEquals(code, 0)
+ def compactJson = parseJson(out.trim())
+ //assertEquals("success", compactJson.status.toLowerCase())
+ }
+
+ // wait for all compactions done
+ for (String[] tablet in tablets) {
+ boolean running = true
+ do {
+ Thread.sleep(1000)
+ String tablet_id = tablet[0]
+ StringBuilder sb = new StringBuilder();
+ sb.append("curl -X GET http://")
+ sb.append(backendIdToBackendIP.get(backend_id))
+ sb.append(":")
+ sb.append(backendIdToBackendHttpPort.get(backend_id))
+ sb.append("/api/compaction/run_status?tablet_id=")
+ sb.append(tablet_id)
+
+ String command = sb.toString()
+ logger.info(command)
+ process = command.execute()
+ code = process.waitFor()
+ err = IOGroovyMethods.getText(new BufferedReader(new
InputStreamReader(process.getErrorStream())));
+ out = process.getText()
+ logger.info("Get compaction status: code=" + code + ", out=" +
out + ", err=" + err)
+ assertEquals(code, 0)
+ def compactionStatus = parseJson(out.trim())
+ //assertEquals("success",
compactionStatus.status.toLowerCase())
+ running = compactionStatus.run_status
+ } while (running)
+ }
+ }
+ doCompaction.call(backend_id0, "cumulative")
+ //doCompaction.call(backend_id0, "base")
+ return
+}
+logger.info("Added 'doCloudCompaction' function to Suite")
+
+
diff --git a/regression-test/plugins/cloud_recycler_plugin.groovy
b/regression-test/plugins/cloud_recycler_plugin.groovy
new file mode 100644
index 00000000000..2cf3f37ccd8
--- /dev/null
+++ b/regression-test/plugins/cloud_recycler_plugin.groovy
@@ -0,0 +1,195 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+import groovy.json.JsonOutput
+
+import org.apache.doris.regression.suite.Suite
+
+import com.amazonaws.auth.AWSStaticCredentialsProvider
+import com.amazonaws.auth.BasicAWSCredentials
+import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
+import com.amazonaws.services.s3.AmazonS3
+import com.amazonaws.services.s3.AmazonS3ClientBuilder
+import com.amazonaws.services.s3.model.ListObjectsRequest
+import com.amazonaws.services.s3.model.ObjectListing
+
+Suite.metaClass.triggerRecycle = { String token, String instanceId /* param */
->
+ // which suite invoke current function?
+ Suite suite = delegate as Suite
+ // function body
+ suite.getLogger().info("Test plugin: suiteName: ${suite.name}, token:
${token}, instance:${instanceId}".toString())
+
+ def triggerRecycleBody = [instance_ids: ["${instanceId}"]]
+ def jsonOutput = new JsonOutput()
+ def triggerRecycleJson = jsonOutput.toJson(triggerRecycleBody)
+ def triggerRecycleResult = null;
+ def triggerRecycleApi = { requestBody, checkFunc ->
+ httpTest {
+ endpoint suite.context.config.recycleServiceHttpAddress
+ uri "/RecyclerService/http/recycle_instance?token=$token"
+ body requestBody
+ check checkFunc
+ }
+ }
+
+ triggerRecycleApi.call(triggerRecycleJson) {
+ respCode, body ->
+ log.info("http cli result: ${body} ${respCode}".toString())
+ triggerRecycleResult = body
+
suite.getLogger().info("triggerRecycleResult:${triggerRecycleResult}".toString())
+ assertTrue(triggerRecycleResult.trim().equalsIgnoreCase("OK"))
+ }
+ return;
+}
+
+logger.info("Added 'triggerRecycle' function to Suite")
+
+
+//cloud mode recycler plugin
+Suite.metaClass.checkRecycleTable = { String token, String instanceId, String
cloudUniqueId, String tableName,
+ Collection<String> tabletIdList /* param */ ->
+ // which suite invoke current function?
+ Suite suite = delegate as Suite
+
+ // function body
+ suite.getLogger().info("""Test plugin: suiteName: ${suite.name},
tableName: ${tableName}, instanceId: ${instanceId}, token:${token},
cloudUniqueId:${cloudUniqueId}""".toString())
+
+ def getObjStoreInfoApiResult = suite.getObjStoreInfo(token, cloudUniqueId);
+ suite.getLogger().info("checkRecycleTable():
getObjStoreInfoApiResult:${getObjStoreInfoApiResult}".toString())
+
+ String ak = getObjStoreInfoApiResult.result.obj_info[0].ak
+ String sk = getObjStoreInfoApiResult.result.obj_info[0].sk
+ String endpoint = getObjStoreInfoApiResult.result.obj_info[0].endpoint
+ String region = getObjStoreInfoApiResult.result.obj_info[0].region
+ String prefix = getObjStoreInfoApiResult.result.obj_info[0].prefix
+ String bucket = getObjStoreInfoApiResult.result.obj_info[0].bucket
+ suite.getLogger().info("ak:${ak}, sk:${sk}, endpoint:${endpoint},
prefix:${prefix}".toString())
+
+ def credentials = new BasicAWSCredentials(ak, sk)
+ def endpointConfiguration = new EndpointConfiguration(endpoint, region)
+ def s3Client =
AmazonS3ClientBuilder.standard().withEndpointConfiguration(endpointConfiguration)
+ .withCredentials(new
AWSStaticCredentialsProvider(credentials)).build()
+
+ assertTrue(tabletIdList.size() > 0)
+ for (tabletId : tabletIdList) {
+ suite.getLogger().info("tableName: ${tableName},
tabletId:${tabletId}");
+ def objectListing = s3Client.listObjects(
+ new
ListObjectsRequest().withMaxKeys(1).withBucketName(bucket).withPrefix("${prefix}/data/${tabletId}/"))
+
+ suite.getLogger().info("tableName: ${tableName}, tabletId:${tabletId},
objectListing:${objectListing.getObjectSummaries()}".toString())
+ if (!objectListing.getObjectSummaries().isEmpty()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+logger.info("Added 'checkRecycleTable' function to Suite")
+
+Suite.metaClass.checkRecycleInternalStage = { String token, String instanceId,
String cloudUniqueId, String fileName
+ /* param */ ->
+ // which suite invoke current function?
+ Suite suite = delegate as Suite
+
+ // function body
+ suite.getLogger().info("""Test plugin: suiteName: ${suite.name},
instanceId: ${instanceId}, token:${token},
cloudUniqueId:${cloudUniqueId}""".toString())
+
+ def getObjStoreInfoApiResult = suite.getObjStoreInfo(token, cloudUniqueId);
+ suite.getLogger().info("checkRecycleTable():
getObjStoreInfoApiResult:${getObjStoreInfoApiResult}".toString())
+
+ String ak = getObjStoreInfoApiResult.result.obj_info[0].ak
+ String sk = getObjStoreInfoApiResult.result.obj_info[0].sk
+ String endpoint = getObjStoreInfoApiResult.result.obj_info[0].endpoint
+ String region = getObjStoreInfoApiResult.result.obj_info[0].region
+ String prefix = getObjStoreInfoApiResult.result.obj_info[0].prefix
+ String bucket = getObjStoreInfoApiResult.result.obj_info[0].bucket
+ suite.getLogger().info("ak:${ak}, sk:${sk}, endpoint:${endpoint},
prefix:${prefix}".toString())
+
+ def credentials = new BasicAWSCredentials(ak, sk)
+ def endpointConfiguration = new EndpointConfiguration(endpoint, region)
+ def s3Client =
AmazonS3ClientBuilder.standard().withEndpointConfiguration(endpointConfiguration)
+ .withCredentials(new
AWSStaticCredentialsProvider(credentials)).build()
+
+ // for root and admin, userId equal userName
+ String userName = suite.context.config.jdbcUser;
+ String userId = suite.context.config.jdbcUser;
+ def objectListing = s3Client.listObjects(
+ new ListObjectsRequest().withMaxKeys(1)
+ .withBucketName(bucket)
+ .withPrefix("${prefix}/stage/${userName}/${userId}/${fileName}"))
+
+ suite.getLogger().info("${prefix}/stage/${userName}/${userId}/${fileName},
objectListing:${objectListing.getObjectSummaries()}".toString())
+ if (!objectListing.getObjectSummaries().isEmpty()) {
+ return false;
+ }
+
+ return true;
+}
+logger.info("Added 'checkRecycleInternalStage' function to Suite")
+
+Suite.metaClass.checkRecycleExpiredStageObjects = { String token, String
instanceId, String cloudUniqueId, Set<String> nonExistFileNames, Set<String>
existFileNames ->
+ // which suite invoke current function?
+ Suite suite = delegate as Suite
+
+ // function body
+ suite.getLogger().info("""Test plugin: suiteName: ${suite.name},
instanceId: ${instanceId}, token:${token},
cloudUniqueId:${cloudUniqueId}""".toString())
+
+ def getObjStoreInfoApiResult = suite.getObjStoreInfo(token, cloudUniqueId);
+ suite.getLogger().info("checkRecycleExpiredStageObjects():
getObjStoreInfoApiResult:${getObjStoreInfoApiResult}".toString())
+
+ String ak = getObjStoreInfoApiResult.result.obj_info[0].ak
+ String sk = getObjStoreInfoApiResult.result.obj_info[0].sk
+ String endpoint = getObjStoreInfoApiResult.result.obj_info[0].endpoint
+ String region = getObjStoreInfoApiResult.result.obj_info[0].region
+ String prefix = getObjStoreInfoApiResult.result.obj_info[0].prefix
+ String bucket = getObjStoreInfoApiResult.result.obj_info[0].bucket
+ suite.getLogger().info("ak:${ak}, sk:${sk}, endpoint:${endpoint},
prefix:${prefix}".toString())
+
+ def credentials = new BasicAWSCredentials(ak, sk)
+ def endpointConfiguration = new EndpointConfiguration(endpoint, region)
+ def s3Client =
AmazonS3ClientBuilder.standard().withEndpointConfiguration(endpointConfiguration)
+ .withCredentials(new
AWSStaticCredentialsProvider(credentials)).build()
+
+ // for root and admin, userId equal userName
+ String userName = suite.context.config.jdbcUser;
+ String userId = suite.context.config.jdbcUser;
+ def objectListing = s3Client.listObjects(
+ new ListObjectsRequest()
+ .withBucketName(bucket)
+ .withPrefix("${prefix}/stage/${userName}/${userId}/"))
+
+ suite.getLogger().info("${prefix}/stage/${userName}/${userId}/,
objectListing:${objectListing.getObjectSummaries()}".toString())
+ Set<String> fileNames = new HashSet<>()
+ for (def os: objectListing.getObjectSummaries()) {
+ def split = os.key.split("/")
+ if (split.length <= 0 ) {
+ continue
+ }
+ fileNames.add(split[split.length-1])
+ }
+ for(def f : nonExistFileNames) {
+ if (fileNames.contains(f)) {
+ return false
+ }
+ }
+ for(def f : existFileNames) {
+ if (!fileNames.contains(f)) {
+ return false
+ }
+ }
+ return true
+}
+logger.info("Added 'checkRecycleExpiredStageObjects' function to Suite")
diff --git a/regression-test/plugins/cloud_resource_plugin.groovy
b/regression-test/plugins/cloud_resource_plugin.groovy
new file mode 100644
index 00000000000..98d6eee6e59
--- /dev/null
+++ b/regression-test/plugins/cloud_resource_plugin.groovy
@@ -0,0 +1,76 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+import groovy.json.JsonOutput
+import org.apache.doris.regression.suite.Suite
+
+/*
+ curl
"127.0.0.1:5001/MetaService/http/get_obj_store_info?token=greedisgood9999" -d
'{"cloud_unique_id": "pipeline_tiny_be"}'
+
+ {
+ "code": "OK",
+ "msg": "",
+ "result": {
+ "obj_info": [
+ {
+ "ctime": "1666080836",
+ "mtime": "1666080836",
+ "id": "1",
+ "ak": "xxxxxx",
+ "sk": "xxxxxxxxx",
+ "bucket": "xxxxxxxxx",
+ "prefix": "pipeline-pipeline-prefix",
+ "endpoint": "cos.ap-beijing.myqcloud.com",
+ "region": "ap-beijing",
+ "provider": "COS"
+ }
+ ]
+ }
+ }
+*/
+Suite.metaClass.getObjStoreInfo = { String token, String cloudUniqueId ->
+
+ // which suite invoke current function?
+ Suite suite = delegate as Suite
+
+ // function body
+ suite.getLogger().info("Test plugin: suiteName: ${suite.name},
token:${token}, cloudUniqueId: ${cloudUniqueId}".toString())
+
+ def getObjStoreInfoApiBody = [cloud_unique_id:"${cloudUniqueId}"]
+ def jsonOutput = new JsonOutput()
+ def getObjStoreInfoApiBodyJson = jsonOutput.toJson(getObjStoreInfoApiBody)
+ def getObjStoreInfoApi = { requestBody, checkFunc ->
+ httpTest {
+ endpoint suite.context.config.metaServiceHttpAddress
+ uri "/MetaService/http/get_obj_store_info?token=$token"
+ body requestBody
+ check checkFunc
+ }
+ }
+
+ def getObjStoreInfoApiResult = null
+ getObjStoreInfoApi.call(getObjStoreInfoApiBodyJson) {
+ respCode, body ->
+ log.info("http cli result: ${body} ${respCode}".toString())
+ getObjStoreInfoApiResult = parseJson(body)
+ assertTrue(getObjStoreInfoApiResult.code.equalsIgnoreCase("OK"))
+ }
+
suite.getLogger().info("getObjStoreInfoApiResult:${getObjStoreInfoApiResult}".toString())
+ return getObjStoreInfoApiResult
+}
+
+logger.info("Added 'getObjStoreInfo' function to Suite")
+
diff --git a/regression-test/plugins/cloud_smoke_plugin.groovy
b/regression-test/plugins/cloud_smoke_plugin.groovy
new file mode 100644
index 00000000000..467b2a5a2c3
--- /dev/null
+++ b/regression-test/plugins/cloud_smoke_plugin.groovy
@@ -0,0 +1,48 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+import groovy.json.JsonOutput
+import org.apache.doris.regression.suite.Suite
+
+Suite.metaClass.compareCloudVersion = { v1, v2 ->
+ String[] v1Arr = v1.split("\\.");
+ String[] v2Arr = v2.split("\\.");
+
+ for (int i = 0; i < v1Arr.size() || i < v2Arr.size(); ++i) {
+ if (i >= v1Arr.size()) {
+ return -1;
+ }
+
+ if (i >= v2Arr.size()) {
+ return 1;
+ }
+
+ int n1 = v1Arr[i].toInteger()
+ int n2 = v2Arr[i].toInteger()
+ if (n1 > n2) {
+ return 1
+ } else if (n1 < n2) {
+ return -1
+ } else if (n1 == n2) {
+ continue
+ }
+ }
+
+ return 0;
+}
+
+logger.info("Added 'cloud smoke' function to Suite")
+
diff --git a/regression-test/plugins/cloud_smooth_upgrade.groovy
b/regression-test/plugins/cloud_smooth_upgrade.groovy
new file mode 100644
index 00000000000..8e63389aa6e
--- /dev/null
+++ b/regression-test/plugins/cloud_smooth_upgrade.groovy
@@ -0,0 +1,65 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+import groovy.json.JsonOutput
+import org.apache.doris.regression.suite.Suite
+
+Suite.metaClass.addNodeForSmoothUpgrade = { String be_unique_id, String ip,
String port, String cluster_name, String cluster_id ->
+
+ def token = context.config.metaServiceToken
+ def instance_id = context.config.multiClusterInstance
+
+ // which suite invoke current function?
+ Suite suite = delegate as Suite
+
+ // function body
+ def jsonOutput = new JsonOutput()
+ def clusterInfo = [
+ type: "COMPUTE",
+ cluster_name : cluster_name,
+ cluster_id : cluster_id,
+ nodes: [
+ [
+ cloud_unique_id: be_unique_id,
+ ip: ip,
+ heartbeat_port: port,
+ is_smooth_upgrade: true
+ ],
+ ]
+ ]
+ def map = [instance_id: "${instance_id}", cluster: clusterInfo]
+ def js = jsonOutput.toJson(map)
+ log.info("add node req: ${js} ".toString())
+
+ def add_cluster_api = { request_body, check_func ->
+ httpTest {
+ endpoint context.config.metaServiceHttpAddress
+ uri "/MetaService/http/add_node?token=${token}"
+ body request_body
+ check check_func
+ }
+ }
+
+ add_cluster_api.call(js) {
+ respCode, body ->
+ log.info("add node resp: ${body} ${respCode}".toString())
+ def json = parseJson(body)
+ assertTrue(json.code.equalsIgnoreCase("OK") ||
json.code.equalsIgnoreCase("ALREADY_EXISTED"))
+ }
+}
+
+logger.info("Added 'addNodeForSmoothUpgrade' function to Suite")
+
diff --git a/regression-test/plugins/cloud_stage_ram_plugin.groovy
b/regression-test/plugins/cloud_stage_ram_plugin.groovy
new file mode 100644
index 00000000000..c2bef010637
--- /dev/null
+++ b/regression-test/plugins/cloud_stage_ram_plugin.groovy
@@ -0,0 +1,424 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+import groovy.json.JsonOutput
+import org.apache.doris.regression.suite.Suite
+import org.codehaus.groovy.runtime.IOGroovyMethods
+import java.util.Base64
+
+import com.aliyuncs.DefaultAcsClient
+import com.aliyuncs.IAcsClient
+import com.aliyuncs.exceptions.ClientException
+import com.aliyuncs.profile.DefaultProfile
+import com.aliyuncs.ram.model.v20150501.*
+
+import com.tencentcloudapi.common.Credential
+import com.tencentcloudapi.cam.v20190116.CamClient
+
+import software.amazon.awssdk.auth.credentials.AwsBasicCredentials
+import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider
+import software.amazon.awssdk.regions.Region
+import software.amazon.awssdk.services.iam.model.AttachRolePolicyRequest
+import software.amazon.awssdk.services.iam.model.DetachRolePolicyRequest
+import software.amazon.awssdk.services.iam.model.EntityAlreadyExistsException;
+
+Suite.metaClass.getRoleName = { String instanceId ->
+ return "smoke-test-" + instanceId.replaceAll("_", "-")
+}
+logger.info("Added 'getRoleName' function to Suite")
+
+Suite.metaClass.getExternalId = { String instanceId ->
+ return Base64.getEncoder().encodeToString(instanceId.getBytes("UTF-8"));
+}
+logger.info("Added 'getExternalId' function to Suite")
+
+// oss: return arn
+// cos: return empty means role already exists
+// obs: (return agency id)
+// s3: return arn
+Suite.metaClass.getRole = { String provider, String region, String ak, String
sk, String instanceId ->
+ Suite suite = delegate as Suite
+ suite.getLogger().info("cloud_stage_ram plugin: suiteName: ${suite.name},
getRole, provider: ${provider}, instance: ${instanceId},
region:${region}".toString())
+ def roleName = getRoleName(instanceId)
+
+ if (provider.equalsIgnoreCase("oss")) {
+ DefaultProfile profile = DefaultProfile.getProfile(region, ak, sk);
+ IAcsClient client = new DefaultAcsClient(profile);
+ GetRoleRequest getRoleRequest = new GetRoleRequest();
+ getRoleRequest.setRoleName(roleName)
+ try {
+ GetRoleResponse response = client.getAcsResponse(getRoleRequest)
+ return response.getRole().getArn()
+ } catch (Exception e) {
+ return null
+ }
+ } else if (provider.equalsIgnoreCase("cos")) {
+ Credential cred = new Credential(ak, sk)
+ CamClient client = new CamClient(cred, "")
+ com.tencentcloudapi.cam.v20190116.models.GetRoleRequest getRoleRequest
= new com.tencentcloudapi.cam.v20190116.models.GetRoleRequest()
+ getRoleRequest.setRoleName(roleName)
+ try {
+ com.tencentcloudapi.cam.v20190116.models.GetRoleResponse
getRoleResponse = client.GetRole(getRoleRequest)
+ // ANNT: txcloud does not return arn
+ return ""
+ } catch (Exception e) {
+ return null
+ }
+ } else if (provider.equalsIgnoreCase("obs")) {
+ // do nothing
+ } else if (provider.equalsIgnoreCase("s3")) {
+ try {
+ AwsBasicCredentials basicCredentials =
AwsBasicCredentials.create(ak, sk)
+ StaticCredentialsProvider scp =
StaticCredentialsProvider.create(basicCredentials)
+ software.amazon.awssdk.services.iam.IamClient iam =
software.amazon.awssdk.services.iam.IamClient.builder()
+ .region(Region.of(region)).credentialsProvider(scp).build()
+ software.amazon.awssdk.services.iam.model.GetRoleRequest
roleRequest = software.amazon.awssdk.services.iam.model.GetRoleRequest.builder()
+ .roleName(roleName).build()
+ software.amazon.awssdk.services.iam.model.GetRoleResponse response
= iam.getRole(roleRequest)
+ return response.role().arn()
+ } catch (Exception e) {
+ return null
+ }
+ }
+ throw new Exception("Unsupported provider [" + provider + "] for get role")
+}
+logger.info("Added 'getRole' function to Suite")
+
+// s3: policyName is policy arn
+Suite.metaClass.attachPolicy = { String provider, String region, String ak,
String sk, String roleName, String policyName ->
+ Suite suite = delegate as Suite
+ suite.getLogger().info("cloud_stage_ram plugin: suiteName: ${suite.name},
attachPolicy, provider: ${provider}, region: ${region}, roleName: ${roleName},
policyName: ${policyName}".toString())
+
+ if (provider.equalsIgnoreCase("oss")) {
+ DefaultProfile profile = DefaultProfile.getProfile(region, ak, sk)
+ IAcsClient client = new DefaultAcsClient(profile)
+ AttachPolicyToRoleRequest attachPolicyToRoleRequest = new
AttachPolicyToRoleRequest();
+ attachPolicyToRoleRequest.setPolicyType("Custom")
+ attachPolicyToRoleRequest.setPolicyName(policyName)
+ attachPolicyToRoleRequest.setRoleName(roleName)
+ try {
+ AttachPolicyToRoleResponse response =
client.getAcsResponse(attachPolicyToRoleRequest)
+ } catch (ClientException e) {
+ if (!e.getErrCode().equals("EntityAlreadyExists.Role.Policy")) {
+ throw e
+ }
+ }
+ } else if (provider.equalsIgnoreCase("cos")) {
+ // cos does not throw exception if policy is already attached
+ Credential cred = new Credential(ak, sk)
+ CamClient client = new CamClient(cred, "")
+ com.tencentcloudapi.cam.v20190116.models.AttachRolePolicyRequest
attachRolePolicyRequest = new
com.tencentcloudapi.cam.v20190116.models.AttachRolePolicyRequest()
+ attachRolePolicyRequest.setAttachRoleName(roleName)
+ attachRolePolicyRequest.setPolicyName(policyName)
+ com.tencentcloudapi.cam.v20190116.models.AttachRolePolicyResponse
attachRolePolicyResponse = client.AttachRolePolicy(attachRolePolicyRequest)
+ } else if (provider.equalsIgnoreCase("s3")) {
+ try {
+ AwsBasicCredentials basicCredentials =
AwsBasicCredentials.create(ak, sk)
+ StaticCredentialsProvider scp =
StaticCredentialsProvider.create(basicCredentials)
+ software.amazon.awssdk.services.iam.IamClient iam =
software.amazon.awssdk.services.iam.IamClient.builder()
+ .region(Region.of(region)).credentialsProvider(scp).build()
+ AttachRolePolicyRequest attachRequest =
AttachRolePolicyRequest.builder()
+ .roleName(roleName).policyArn(policyName).build()
+ iam.attachRolePolicy(attachRequest)
+ } catch (EntityAlreadyExistsException e) {
+ // policy already detached, ignore
+ }
+ } else {
+ throw new Exception("Unsupported provider [" + provider + "] for
attach policy")
+ }
+}
+logger.info("Added 'attachPolicy' function to Suite")
+
+Suite.metaClass.createRole = { String provider, String region, String ak,
String sk, String instanceRamUserId, String instanceId, String policy, String
userId ->
+ Suite suite = delegate as Suite
+ suite.getLogger().info("cloud_stage_ram plugin: suiteName: ${suite.name},
createRole, provider: ${provider}, region: ${region}, instance: ${instanceId},
region: ${region}".toString())
+ def roleName = getRoleName(instanceId)
+ def roleDescription = "冒烟测试: " + instanceId
+ def externalId = getExternalId(instanceId)
+
+ if (provider.equalsIgnoreCase("oss")) {
+ // check if role exists
+ def arn = getRole(provider, region, ak, sk, instanceId)
+ if (arn != null) {
+ // attach policy
+ attachPolicy(provider, region, ak, sk, roleName, policy)
+ suite.getLogger().info("Role [${roleName}] already exist, arn
[${arn}], skip create.")
+ return new String[]{roleName, arn}
+ }
+
+ // role does not exist, create role
+ DefaultProfile profile = DefaultProfile.getProfile(region, ak, sk)
+ IAcsClient client = new DefaultAcsClient(profile)
+ CreateRoleRequest request = new CreateRoleRequest()
+ request.setRoleName(roleName)
+ /**
+ * {
+ * "Statement": [
+ * {
+ * "Action": "sts:AssumeRole",
+ * "Effect": "Allow",
+ * "Principal": {
+ * "RAM": [
+ * "acs:ram::1276155707910852:root"
+ * ]
+ * }
+ * }
+ * ],
+ * "Version": "1"
+ * }
+ */
+ request.setAssumeRolePolicyDocument("{\n" +
+ " \"Statement\": [\n" +
+ " {\n" +
+ " \"Action\": \"sts:AssumeRole\",\n" +
+ " \"Effect\": \"Allow\",\n" +
+ " \"Principal\": {\n" +
+ " \"RAM\": [\n" +
+ " \"acs:ram::" + instanceRamUserId + ":root\"\n" +
+ " ]\n" +
+ " }\n" +
+ " }\n" +
+ " ],\n" +
+ " \"Version\": \"1\"\n" +
+ "}");
+ request.setDescription(roleDescription)
+ // request.setMaxSessionDuration(3600)
+ CreateRoleResponse response = client.getAcsResponse(request)
+
+ // attach policy
+ attachPolicy(provider, region, ak, sk, roleName, policy)
+ return new String[]{roleName, response.getRole().getArn()}
+ } else if (provider.equals("cos")) {
+ // check if role exists
+ def arn = getRole(provider, region, ak, sk, instanceId)
+ if (arn != null) {
+ arn = "qcs::cam::uin/" + userId + ":roleName/" + roleName
+ suite.getLogger().info("Role [${roleName}] already exist, arn
[${arn}], skip create.")
+ // attach policy
+ attachPolicy(provider, region, ak, sk, roleName, policy)
+ return new String[]{roleName, arn}
+ }
+
+ Credential cred = new Credential(ak, sk)
+ /*HttpProfile httpProfile = new HttpProfile()
+ httpProfile.setEndpoint("cam.tencentcloudapi.com")
+ ClientProfile clientProfile = new ClientProfile()
+ clientProfile.setHttpProfile(httpProfile)
+ CamClient client = new CamClient(cred, region, clientProfile)*/
+ CamClient client = new CamClient(cred, "")
+ com.tencentcloudapi.cam.v20190116.models.CreateRoleRequest req = new
com.tencentcloudapi.cam.v20190116.models.CreateRoleRequest()
+ req.setRoleName(roleName)
+ /**
+ * {
+ * "Statement": [
+ * {
+ * "Action": "name/sts:AssumeRole",
+ * "Effect": "Allow",
+ * "Principal": {
+ * "qcs": [
+ * "qcs::cam::uin/100029159234:root"
+ * ]
+ * },
+ * "condition": {
+ * "string_equal": {
+ * "sts:external_id": "aW5zdGFuY2VfbWVpeWlfZGV2"
+ * }
+ * }
+ * }
+ * ],
+ * "Version": "2.0"
+ * }
+ */
+ String policyDocument = "{\n" +
+ " \"Statement\": [\n" +
+ " {\n" +
+ " \"Action\": \"name/sts:AssumeRole\",\n" +
+ " \"Effect\": \"Allow\",\n" +
+ " \"Principal\": {\n" +
+ " \"qcs\": [\n" +
+ " \"qcs::cam::uin/" + instanceRamUserId +
":root\"\n" +
+ " ]\n" +
+ " },\n" +
+ " \"condition\": {\n" +
+ " \"string_equal\": {\n" +
+ " \"sts:external_id\": \"" + externalId + "\"\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " ],\n" +
+ " \"Version\": \"2.0\"\n" +
+ "}";
+ req.setPolicyDocument(policyDocument)
+ req.setDescription(roleDescription)
+ com.tencentcloudapi.cam.v20190116.models.CreateRoleResponse resp =
client.CreateRole(req)
+
+ // attach policy
+ attachPolicy(provider, region, ak, sk, roleName, policy)
+
+ // ANNT: txcloud does not return arn
+ return new String[]{roleName, "qcs::cam::uin/" + userId + ":roleName/"
+ roleName}
+ } else if (provider.equals("s3")) {
+ // s3 does not support chinese characters
+ roleDescription = "smoke-test: " + instanceId
+ // check if role exists
+ def arn = getRole(provider, region, ak, sk, instanceId)
+ if (arn != null) {
+ // attach policy
+ attachPolicy(provider, region, ak, sk, roleName, policy)
+ suite.getLogger().info("Role [${roleName}] already exist, arn
[${arn}], skip create.")
+ return new String[]{roleName, arn}
+ }
+
+ // create role
+ AwsBasicCredentials basicCredentials = AwsBasicCredentials.create(ak,
sk)
+ StaticCredentialsProvider scp =
StaticCredentialsProvider.create(basicCredentials)
+ software.amazon.awssdk.services.iam.IamClient iam =
software.amazon.awssdk.services.iam.IamClient.builder()
+ .region(Region.of(region)).credentialsProvider(scp).build()
+ /**
+ * {
+ * "Version": "2012-10-17",
+ * "Statement": [
+ * {
+ * "Effect": "Allow",
+ * "Principal": {
+ * "AWS": "arn:aws:iam::757278738533:root"
+ * },
+ * "Action": "sts:AssumeRole",
+ * "Condition": {
+ * "StringEquals": {
+ * "sts:ExternalId": "aW5zdGFuY2VfbWVpeWlfZGV2"
+ * }
+ * }
+ * }
+ * ]
+ * }
+ */
+ String policyDocument = "{\n" +
+ " \"Version\": \"2012-10-17\",\n" +
+ " \"Statement\": [\n" +
+ " {\n" +
+ " \"Effect\": \"Allow\",\n" +
+ " \"Principal\": {\n" +
+ " \"AWS\": \"arn:aws:iam::" +
instanceRamUserId + ":root\"\n" +
+ " },\n" +
+ " \"Action\": \"sts:AssumeRole\",\n" +
+ " \"Condition\": {\n" +
+ " \"StringEquals\": {\n" +
+ " \"sts:ExternalId\": \"" + externalId
+ "\"\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " ]\n" +
+ "}";
+ software.amazon.awssdk.services.iam.model.CreateRoleRequest request =
software.amazon.awssdk.services.iam.model.CreateRoleRequest.builder()
+
.roleName(roleName).assumeRolePolicyDocument(policyDocument).description(roleDescription).build()
+ software.amazon.awssdk.services.iam.model.CreateRoleResponse response
= iam.createRole(request)
+
+ // attach policy
+ attachPolicy(provider, region, ak, sk, roleName, policy)
+ return new String[]{roleName, response.role().arn()}
+ }
+ throw new Exception("Unsupported provider [" + provider + "] for create
role")
+}
+logger.info("Added 'createRole' function to Suite")
+
+// cos and obs do not need to detach policy when delete role
+Suite.metaClass.detachPolicy = { String provider, String region, String ak,
String sk, String roleName, String policyName ->
+ Suite suite = delegate as Suite
+ suite.getLogger().info("cloud_stage_ram plugin: suiteName: ${suite.name},
detachPolicy, provider: ${provider}, region: ${region}, roleName:
${roleName}".toString())
+
+ if (provider.equalsIgnoreCase("oss")) {
+ DetachPolicyFromRoleRequest detachPolicyFromRoleRequest = new
DetachPolicyFromRoleRequest()
+ detachPolicyFromRoleRequest.setPolicyType("Custom");
+ detachPolicyFromRoleRequest.setPolicyName(policyName);
+ detachPolicyFromRoleRequest.setRoleName(roleName);
+ try {
+ DefaultProfile profile = DefaultProfile.getProfile(region, ak, sk)
+ IAcsClient client = new DefaultAcsClient(profile)
+ DetachPolicyFromRoleResponse response =
client.getAcsResponse(detachPolicyFromRoleRequest)
+ } catch (Exception e) {
+ suite.getLogger().warn("Failed detach policy for role
[${roleName}] and policy [${policyName}], " + e.getMessage())
+ }
+ } else if (provider.equalsIgnoreCase("s3")) {
+ try {
+ AwsBasicCredentials basicCredentials =
AwsBasicCredentials.create(ak, sk)
+ StaticCredentialsProvider scp =
StaticCredentialsProvider.create(basicCredentials)
+ software.amazon.awssdk.services.iam.IamClient iam =
software.amazon.awssdk.services.iam.IamClient.builder()
+ .region(Region.of(region)).credentialsProvider(scp).build()
+ DetachRolePolicyRequest request = DetachRolePolicyRequest.builder()
+ .roleName(roleName).policyArn(policyName).build()
+ iam.detachRolePolicy(request)
+ } catch (Exception e) {
+ suite.getLogger().warn("Failed detach policy for role
[${roleName}] and policy [${policyName}], " + e.getMessage())
+ }
+ } else {
+ throw new Exception("Unsupported provider [" + provider + "] for
attach policy")
+ }
+}
+logger.info("Added 'detachPolicy' function to Suite")
+
+// oss: detach policy firstly
+// cos: does not need to detach policy when delete role
+// obs:
+// s3: detach policy firstly; policyName is policy arn;
+Suite.metaClass.deleteRole = { String provider, String region, String ak,
String sk, String instanceId, String policyName ->
+ Suite suite = delegate as Suite
+ suite.getLogger().info("cloud_stage_ram plugin: suiteName: ${suite.name},
deleteRole, provider: ${provider}, region: ${region}, instanceId:
${instanceId}".toString())
+ def roleName = getRoleName(instanceId)
+
+ if (provider.equalsIgnoreCase("oss") || provider.equalsIgnoreCase("obs")) {
+ return
+ }
+
+ if(provider.equalsIgnoreCase("cos")) {
+ // cos does not need to detach policy when delete role
+ com.tencentcloudapi.cam.v20190116.models.DeleteRoleRequest
deleteRoleRequest = new
com.tencentcloudapi.cam.v20190116.models.DeleteRoleRequest()
+ deleteRoleRequest.setRoleName(roleName)
+ try {
+ Credential cred = new Credential(ak, sk)
+ CamClient client = new CamClient(cred, "")
+ com.tencentcloudapi.cam.v20190116.models.DeleteRoleResponse
deleteRoleResponse = client.DeleteRole(deleteRoleRequest)
+ } catch (Exception e) {
+ suite.getLogger().warn("Failed delete role for role [${roleName}],
" + e.getMessage())
+ }
+ } else if(provider.equalsIgnoreCase("s3")) {
+ detachPolicy(provider, region, ak, sk, roleName, policyName)
+ try {
+ AwsBasicCredentials basicCredentials =
AwsBasicCredentials.create(ak, sk)
+ StaticCredentialsProvider scp =
StaticCredentialsProvider.create(basicCredentials)
+ software.amazon.awssdk.services.iam.IamClient iam =
software.amazon.awssdk.services.iam.IamClient.builder()
+ .region(Region.of(region)).credentialsProvider(scp).build()
+ software.amazon.awssdk.services.iam.model.DeleteRoleRequest
request =
software.amazon.awssdk.services.iam.model.DeleteRoleRequest.builder().roleName(roleName).build();
+ software.amazon.awssdk.services.iam.model.DeleteRoleResponse
response = iam.deleteRole(request);
+ } catch (Exception e) {
+ suite.getLogger().warn("Failed delete role for role [${roleName}],
" + e.getMessage())
+ }
+ } else if (provider.equalsIgnoreCase("oss")) { // useful when oss support
external id
+ detachPolicy(provider, region, ak, sk, roleName, policyName)
+ DeleteRoleRequest deleteRoleRequest = new DeleteRoleRequest()
+ deleteRoleRequest.setRoleName(roleName)
+ try {
+ DefaultProfile profile = DefaultProfile.getProfile(region, ak, sk)
+ IAcsClient client = new DefaultAcsClient(profile)
+ DeleteRoleResponse response =
client.getAcsResponse(deleteRoleRequest)
+ } catch (Exception e) {
+ suite.getLogger().warn("Failed delete role for role [${roleName}],
" + e.getMessage())
+ }
+ } else {
+ throw new Exception("Unsupported provider [" + provider + "] for
delete role")
+ }
+}
+logger.info("Added 'deleteRole' function to Suite")
\ No newline at end of file
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]