This is an automated email from the ASF dual-hosted git repository. areusch pushed a commit to branch areusch/freeze-dependencies in repository https://gitbox.apache.org/repos/asf/tvm.git
commit b39a6285e5ac74bd03319a4b9726c8147702bbe0 Author: Andrew Reusch <[email protected]> AuthorDate: Thu May 19 15:26:47 2022 -0700 Patch publish Jenkinsfiles PR. --- Jenkinsfile | 505 ++++++++++++++++++++------------- jenkins/Jenkinsfile.j2 | 383 +++++++++++++++---------- jenkins/macros.j2 | 9 +- tests/python/ci/test_ci.py | 95 ++++++- tests/scripts/cmd_utils.py | 21 +- tests/scripts/git_utils.py | 1 + tests/scripts/http_utils.py | 34 +++ tests/scripts/should_rebuild_docker.py | 154 ++++++++++ 8 files changed, 834 insertions(+), 368 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 7b8c8f890d..03eb1db58b 100755 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -45,7 +45,7 @@ // 'python3 jenkins/generate.py' // Note: This timestamp is here to ensure that updates to the Jenkinsfile are // always rebased on main before merging: -// Generated at 2022-05-20T18:06:10.772162 +// Generated at 2022-05-20T12:01:36.490148 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils // NOTE: these lines are scanned by docker/dev_common.sh. Please update the regex as needed. --> @@ -193,63 +193,22 @@ if (currentBuild.getBuildCauses().toString().contains('BranchIndexingCause')) { cancel_previous_build() -def lint() { -stage('Lint') { - parallel( +def run_lint() { + stage('Lint') { + parallel( 'Lint 1 of 2': { node('CPU-SMALL') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/lint") { init_git() + docker_init(ci_lint) timeout(time: max_time, unit: 'MINUTES') { withEnv([ 'TVM_NUM_SHARDS=2', 'TVM_SHARD_INDEX=0'], { - ci_arm = params.ci_arm_param ?: ci_arm - ci_cpu = params.ci_cpu_param ?: ci_cpu - ci_gpu = params.ci_gpu_param ?: ci_gpu - ci_hexagon = params.ci_hexagon_param ?: ci_hexagon - ci_i386 = params.ci_i386_param ?: ci_i386 - ci_lint = params.ci_lint_param ?: ci_lint - ci_qemu = params.ci_qemu_param ?: ci_qemu - ci_wasm = params.ci_wasm_param ?: ci_wasm - - sh (script: """ - echo "Docker images being used in this build:" - echo " ci_arm = ${ci_arm}" - echo " ci_cpu = ${ci_cpu}" - echo " ci_gpu = ${ci_gpu}" - echo " ci_hexagon = ${ci_hexagon}" - echo " ci_i386 = ${ci_i386}" - echo " ci_lint = ${ci_lint}" - echo " ci_qemu = ${ci_qemu}" - echo " ci_wasm = ${ci_wasm}" - """, label: 'Docker image names') - - is_docs_only_build = sh ( - returnStatus: true, - script: './tests/scripts/git_change_docs.sh', - label: 'Check for docs only changes', - ) - skip_ci = should_skip_ci(env.CHANGE_ID) - skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID) - rebuild_docker_images = sh ( - returnStatus: true, - script: './tests/scripts/git_change_docker.sh', - label: 'Check for any docker changes', - ) - if (skip_ci) { - // Don't rebuild when skipping CI - rebuild_docker_images = false - } - if (rebuild_docker_images) { - // Exit before linting so we can use the newly created Docker images - // to run the lint - return - } sh ( - script: "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh", - label: 'Run lint', - ) + script: "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh", + label: 'Run lint', + ) }) } } @@ -259,80 +218,120 @@ stage('Lint') { node('CPU-SMALL') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/lint") { init_git() + docker_init(ci_lint) timeout(time: max_time, unit: 'MINUTES') { withEnv([ 'TVM_NUM_SHARDS=2', 'TVM_SHARD_INDEX=1'], { - ci_arm = params.ci_arm_param ?: ci_arm - ci_cpu = params.ci_cpu_param ?: ci_cpu - ci_gpu = params.ci_gpu_param ?: ci_gpu - ci_hexagon = params.ci_hexagon_param ?: ci_hexagon - ci_i386 = params.ci_i386_param ?: ci_i386 - ci_lint = params.ci_lint_param ?: ci_lint - ci_qemu = params.ci_qemu_param ?: ci_qemu - ci_wasm = params.ci_wasm_param ?: ci_wasm - - sh (script: """ - echo "Docker images being used in this build:" - echo " ci_arm = ${ci_arm}" - echo " ci_cpu = ${ci_cpu}" - echo " ci_gpu = ${ci_gpu}" - echo " ci_hexagon = ${ci_hexagon}" - echo " ci_i386 = ${ci_i386}" - echo " ci_lint = ${ci_lint}" - echo " ci_qemu = ${ci_qemu}" - echo " ci_wasm = ${ci_wasm}" - """, label: 'Docker image names') - - is_docs_only_build = sh ( - returnStatus: true, - script: './tests/scripts/git_change_docs.sh', - label: 'Check for docs only changes', - ) - skip_ci = should_skip_ci(env.CHANGE_ID) - skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID) - rebuild_docker_images = sh ( - returnStatus: true, - script: './tests/scripts/git_change_docker.sh', - label: 'Check for any docker changes', - ) - if (skip_ci) { - // Don't rebuild when skipping CI - rebuild_docker_images = false - } - if (rebuild_docker_images) { - // Exit before linting so we can use the newly created Docker images - // to run the lint - return - } sh ( - script: "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh", - label: 'Run lint', - ) + script: "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh", + label: 'Run lint', + ) }) } } } }, - ) + ) + } } + +def prepare() { + stage('Prepare') { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/prepare") { + init_git() + ci_arm = params.ci_arm_param ?: ci_arm + ci_cpu = params.ci_cpu_param ?: ci_cpu + ci_gpu = params.ci_gpu_param ?: ci_gpu + ci_hexagon = params.ci_hexagon_param ?: ci_hexagon + ci_i386 = params.ci_i386_param ?: ci_i386 + ci_lint = params.ci_lint_param ?: ci_lint + ci_qemu = params.ci_qemu_param ?: ci_qemu + ci_wasm = params.ci_wasm_param ?: ci_wasm + + sh (script: """ + echo "Docker images being used in this build:" + echo " ci_arm = ${ci_arm}" + echo " ci_cpu = ${ci_cpu}" + echo " ci_gpu = ${ci_gpu}" + echo " ci_hexagon = ${ci_hexagon}" + echo " ci_i386 = ${ci_i386}" + echo " ci_lint = ${ci_lint}" + echo " ci_qemu = ${ci_qemu}" + echo " ci_wasm = ${ci_wasm}" + """, label: 'Docker image names') + + is_docs_only_build = sh ( + returnStatus: true, + script: './tests/scripts/git_change_docs.sh', + label: 'Check for docs only changes', + ) + skip_ci = should_skip_ci(env.CHANGE_ID) + skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID) + rebuild_docker_images = sh ( + returnStatus: true, + script: './tests/scripts/should_rebuild_docker.py', + label: 'Check for any docker changes', + ) + if (skip_ci) { + // Don't rebuild when skipping CI + rebuild_docker_images = false + } + } + } + } } // [note: method size] // This has to be extracted into a method due to JVM limitations on the size of // a method (so the code can't all be inlined) -lint() +prepare() -def build_image(image_name) { - hash = sh( +def ecr_push(full_name) { + aws_account_id = sh( returnStdout: true, - script: 'git log -1 --format=\'%h\'' + script: 'aws sts get-caller-identity | grep Account | cut -f4 -d\\"', + label: 'Get AWS ID' ).trim() - def full_name = "${image_name}:${env.BRANCH_NAME}-${hash}-${env.BUILD_NUMBER}" - sh( - script: "${docker_build} ${image_name} --spec ${full_name}", - label: 'Build docker image' - ) + + def ecr_name = "${aws_account_id}.dkr.ecr.us-west-2.amazonaws.com/${full_name}" + try { + withEnv([ + "AWS_ACCOUNT_ID=${aws_account_id}", + 'AWS_DEFAULT_REGION=us-west-2', + "AWS_ECR_REPO=${aws_account_id}.dkr.ecr.us-west-2.amazonaws.com"]) { + sh( + script: ''' + set -eux + aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin $AWS_ECR_REPO + ''', + label: 'Log in to ECR' + ) + sh( + script: """ + set -x + docker tag ${full_name} \$AWS_ECR_REPO/${full_name} + docker push \$AWS_ECR_REPO/${full_name} + """, + label: 'Upload image to ECR' + ) + } + } finally { + withEnv([ + "AWS_ACCOUNT_ID=${aws_account_id}", + 'AWS_DEFAULT_REGION=us-west-2', + "AWS_ECR_REPO=${aws_account_id}.dkr.ecr.us-west-2.amazonaws.com"]) { + sh( + script: 'docker logout $AWS_ECR_REPO', + label: 'Clean up login credentials' + ) + } + } + return ecr_name +} + +def ecr_pull(full_name) { aws_account_id = sh( returnStdout: true, script: 'aws sts get-caller-identity | grep Account | cut -f4 -d\\"', @@ -340,125 +339,127 @@ def build_image(image_name) { ).trim() try { - // Use a credential so Jenkins knows to scrub the AWS account ID which is nice - // (but so we don't have to rely it being hardcoded in Jenkins) - withCredentials([string( - credentialsId: 'aws-account-id', - variable: '_ACCOUNT_ID_DO_NOT_USE', - )]) { - withEnv([ - "AWS_ACCOUNT_ID=${aws_account_id}", - 'AWS_DEFAULT_REGION=us-west-2']) { - sh( - script: ''' - set -x - aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin $AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com - ''', - label: 'Log in to ECR' - ) - sh( - script: """ - set -x - docker tag ${full_name} \$AWS_ACCOUNT_ID.dkr.ecr.\$AWS_DEFAULT_REGION.amazonaws.com/${full_name} - docker push \$AWS_ACCOUNT_ID.dkr.ecr.\$AWS_DEFAULT_REGION.amazonaws.com/${full_name} - """, - label: 'Upload image to ECR' - ) - } + withEnv([ + "AWS_ACCOUNT_ID=${aws_account_id}", + 'AWS_DEFAULT_REGION=us-west-2', + "AWS_ECR_REPO=${aws_account_id}.dkr.ecr.us-west-2.amazonaws.com"]) { + sh( + script: ''' + set -eux + aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin $AWS_ECR_REPO + ''', + label: 'Log in to ECR' + ) + sh( + script: """ + set -eux + docker pull ${full_name} + """, + label: 'Pull image from ECR' + ) } } finally { - sh( - script: 'rm -f ~/.docker/config.json', - label: 'Clean up login credentials' - ) + withEnv([ + "AWS_ACCOUNT_ID=${aws_account_id}", + 'AWS_DEFAULT_REGION=us-west-2', + "AWS_ECR_REPO=${aws_account_id}.dkr.ecr.us-west-2.amazonaws.com"]) { + sh( + script: 'docker logout $AWS_ECR_REPO', + label: 'Clean up login credentials' + ) + } } +} + + +def build_image(image_name) { + hash = sh( + returnStdout: true, + script: 'git log -1 --format=\'%h\'' + ).trim() + def full_name = "${image_name}:${env.BRANCH_NAME}-${hash}-${env.BUILD_NUMBER}" sh( - script: "docker rmi ${full_name}", - label: 'Remove docker image' + script: "${docker_build} ${image_name} --spec ${full_name}", + label: 'Build docker image' ) + return ecr_push(full_name) } if (rebuild_docker_images) { stage('Docker Image Build') { // TODO in a follow up PR: Find ecr tag and use in subsequent builds - parallel 'ci-lint': { - node('CPU') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - build_image('ci_lint') + parallel( + 'ci_arm': { + node('ARM') { + timeout(time: max_time, unit: 'MINUTES') { + init_git() + ci_arm = build_image('ci_arm') + } } - } - }, 'ci-cpu': { - node('CPU') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - build_image('ci_cpu') + }, + 'ci_cpu': { + node('CPU') { + timeout(time: max_time, unit: 'MINUTES') { + init_git() + ci_cpu = build_image('ci_cpu') + } } - } - }, 'ci-gpu': { - node('GPU') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - build_image('ci_gpu') + }, + 'ci_gpu': { + node('CPU') { + timeout(time: max_time, unit: 'MINUTES') { + init_git() + ci_gpu = build_image('ci_gpu') + } } - } - }, 'ci-qemu': { - node('CPU') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - build_image('ci_qemu') + }, + 'ci_hexagon': { + node('CPU') { + timeout(time: max_time, unit: 'MINUTES') { + init_git() + ci_hexagon = build_image('ci_hexagon') + } } - } - }, 'ci-i386': { - node('CPU') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - build_image('ci_i386') + }, + 'ci_i386': { + node('CPU') { + timeout(time: max_time, unit: 'MINUTES') { + init_git() + ci_i386 = build_image('ci_i386') + } } - } - }, 'ci-arm': { - node('ARM') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - build_image('ci_arm') + }, + 'ci_lint': { + node('CPU') { + timeout(time: max_time, unit: 'MINUTES') { + init_git() + ci_lint = build_image('ci_lint') + } } - } - }, 'ci-wasm': { - node('CPU') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - build_image('ci_wasm') + }, + 'ci_qemu': { + node('CPU') { + timeout(time: max_time, unit: 'MINUTES') { + init_git() + ci_qemu = build_image('ci_qemu') + } } - } - }, 'ci-hexagon': { - node('CPU') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - build_image('ci_hexagon') + }, + 'ci_wasm': { + node('CPU') { + timeout(time: max_time, unit: 'MINUTES') { + init_git() + ci_wasm = build_image('ci_wasm') + } } - } - } + }, + ) } - // // TODO: Once we are able to use the built images, enable this step - // // If the docker images changed, we need to run the image build before the lint - // // can run since it requires a base docker image. Most of the time the images - // // aren't build though so it's faster to use the same node that checks for - // // docker changes to run the lint in the usual case. - // stage('Sanity Check (re-run)') { - // timeout(time: max_time, unit: 'MINUTES') { - // node('CPU') { - // ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/sanity") { - // init_git() - // sh ( - // script: "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh", - // label: 'Run lint', - // ) - // } - // } - // } - // } } +// Run the lint with the new Docker image before continuing to builds +run_lint() + // Run make. First try to do an incremental make from a previous workspace in hope to // accelerate the compilation. If something is wrong, clean the workspace and then // build from scratch. @@ -530,6 +531,19 @@ def add_microtvm_permissions() { } +def docker_init(image) { + if (image.contains("amazonaws.com")) { + // If this string is in the image name it's from ECR and needs to be pulled + // with the right credentials + ecr_pull(image) + } else { + sh( + script: "docker pull ${image}", + label: 'Pull docker image', + ) + } +} + def build() { stage('Build') { environment { @@ -541,6 +555,7 @@ stage('Build') { node('CPU-SMALL') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-gpu") { init_git() + docker_init(ci_gpu) sh "${docker_run} --no-gpu ${ci_gpu} ./tests/scripts/task_config_build_gpu.sh build" make("${ci_gpu} --no-gpu", 'build', '-j2') sh( @@ -587,6 +602,7 @@ stage('Build') { node('CPU-SMALL') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-cpu") { init_git() + docker_init(ci_cpu) sh ( script: "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh build", label: 'Create CPU cmake config', @@ -626,6 +642,7 @@ stage('Build') { node('CPU-SMALL') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-wasm") { init_git() + docker_init(ci_wasm) sh ( script: "${docker_run} ${ci_wasm} ./tests/scripts/task_config_build_wasm.sh build", label: 'Create WASM cmake config', @@ -650,6 +667,7 @@ stage('Build') { node('CPU-SMALL') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-i386") { init_git() + docker_init(ci_i386) sh ( script: "${docker_run} ${ci_i386} ./tests/scripts/task_config_build_i386.sh build", label: 'Create i386 cmake config', @@ -683,6 +701,7 @@ stage('Build') { node('ARM') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-arm") { init_git() + docker_init(ci_arm) sh ( script: "${docker_run} ${ci_arm} ./tests/scripts/task_config_build_arm.sh build", label: 'Create ARM cmake config', @@ -714,6 +733,7 @@ stage('Build') { node('CPU-SMALL') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-qemu") { init_git() + docker_init(ci_qemu) sh ( script: "${docker_run} ${ci_qemu} ./tests/scripts/task_config_build_qemu.sh build", label: 'Create QEMU cmake config', @@ -744,6 +764,7 @@ stage('Build') { node('CPU-SMALL') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-hexagon") { init_git() + docker_init(ci_hexagon) sh ( script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_config_build_hexagon.sh build", label: 'Create Hexagon cmake config', @@ -787,6 +808,7 @@ stage('Test') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-gpu") { try { init_git() + docker_init(ci_gpu) timeout(time: max_time, unit: 'MINUTES') { withEnv([ 'PLATFORM=gpu', @@ -851,6 +873,7 @@ stage('Test') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-gpu") { try { init_git() + docker_init(ci_gpu) timeout(time: max_time, unit: 'MINUTES') { withEnv([ 'PLATFORM=gpu', @@ -901,6 +924,7 @@ stage('Test') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-cpu") { try { init_git() + docker_init(ci_cpu) timeout(time: max_time, unit: 'MINUTES') { withEnv([ 'PLATFORM=cpu', @@ -945,6 +969,7 @@ stage('Test') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-cpu") { try { init_git() + docker_init(ci_cpu) timeout(time: max_time, unit: 'MINUTES') { withEnv([ 'PLATFORM=cpu', @@ -990,6 +1015,7 @@ stage('Test') { timeout(time: max_time, unit: 'MINUTES') { try { init_git() + docker_init(ci_cpu) withEnv(['PLATFORM=cpu'], { sh( script: """ @@ -1033,6 +1059,7 @@ stage('Test') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-i386") { try { init_git() + docker_init(ci_i386) timeout(time: max_time, unit: 'MINUTES') { withEnv([ 'PLATFORM=i386', @@ -1078,6 +1105,7 @@ stage('Test') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-i386") { try { init_git() + docker_init(ci_i386) timeout(time: max_time, unit: 'MINUTES') { withEnv([ 'PLATFORM=i386', @@ -1122,6 +1150,7 @@ stage('Test') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-i386") { try { init_git() + docker_init(ci_i386) timeout(time: max_time, unit: 'MINUTES') { withEnv([ 'PLATFORM=i386', @@ -1166,6 +1195,7 @@ stage('Test') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { try { init_git() + docker_init(ci_hexagon) timeout(time: max_time, unit: 'MINUTES') { withEnv([ 'PLATFORM=hexagon', @@ -1211,6 +1241,7 @@ stage('Test') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { try { init_git() + docker_init(ci_hexagon) timeout(time: max_time, unit: 'MINUTES') { withEnv([ 'PLATFORM=hexagon', @@ -1255,6 +1286,7 @@ stage('Test') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { try { init_git() + docker_init(ci_hexagon) timeout(time: max_time, unit: 'MINUTES') { withEnv([ 'PLATFORM=hexagon', @@ -1299,6 +1331,7 @@ stage('Test') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") { try { init_git() + docker_init(ci_hexagon) timeout(time: max_time, unit: 'MINUTES') { withEnv([ 'PLATFORM=hexagon', @@ -1344,6 +1377,7 @@ stage('Test') { timeout(time: max_time, unit: 'MINUTES') { try { init_git() + docker_init(ci_qemu) withEnv(['PLATFORM=qemu'], { sh( script: """ @@ -1388,6 +1422,7 @@ stage('Test') { timeout(time: max_time, unit: 'MINUTES') { try { init_git() + docker_init(ci_arm) withEnv(['PLATFORM=arm'], { sh( script: """ @@ -1431,6 +1466,7 @@ stage('Test') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") { try { init_git() + docker_init(ci_arm) timeout(time: max_time, unit: 'MINUTES') { withEnv([ 'PLATFORM=arm', @@ -1474,6 +1510,7 @@ stage('Test') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") { try { init_git() + docker_init(ci_arm) timeout(time: max_time, unit: 'MINUTES') { withEnv([ 'PLATFORM=arm', @@ -1517,6 +1554,7 @@ stage('Test') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/topi-python-gpu") { try { init_git() + docker_init(ci_gpu) timeout(time: max_time, unit: 'MINUTES') { withEnv([ 'PLATFORM=gpu', @@ -1559,6 +1597,7 @@ stage('Test') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/topi-python-gpu") { try { init_git() + docker_init(ci_gpu) timeout(time: max_time, unit: 'MINUTES') { withEnv([ 'PLATFORM=gpu', @@ -1601,6 +1640,7 @@ stage('Test') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") { try { init_git() + docker_init(ci_gpu) timeout(time: max_time, unit: 'MINUTES') { withEnv([ 'PLATFORM=gpu', @@ -1643,6 +1683,7 @@ stage('Test') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") { try { init_git() + docker_init(ci_gpu) timeout(time: max_time, unit: 'MINUTES') { withEnv([ 'PLATFORM=gpu', @@ -1685,6 +1726,7 @@ stage('Test') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") { try { init_git() + docker_init(ci_gpu) timeout(time: max_time, unit: 'MINUTES') { withEnv([ 'PLATFORM=gpu', @@ -1728,6 +1770,7 @@ stage('Test') { timeout(time: max_time, unit: 'MINUTES') { try { init_git() + docker_init(ci_cpu) withEnv(['PLATFORM=cpu'], { sh( script: """ @@ -1767,6 +1810,7 @@ stage('Test') { timeout(time: max_time, unit: 'MINUTES') { try { init_git() + docker_init(ci_arm) withEnv(['PLATFORM=arm'], { sh( script: """ @@ -1804,6 +1848,7 @@ stage('Test') { node('GPU') { ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/docs-python-gpu") { init_git() + docker_init(ci_gpu) sh( script: """ set -eux @@ -1907,6 +1952,25 @@ def deploy_docs() { } } + +def update_docker(ecr_image, hub_image) { + if (!ecr_image.contains("amazonaws.com")) { + sh("echo Skipping '${ecr_image}' since it doesn't look like an ECR image") + return + } + sh( + script: """ + set -eux + docker pull ${ecr_image} + docker tag \ + ${ecr_image} \ + ${hub_image} + docker push ${hub_image} + """, + label: "Update ${hub_image} on Docker Hub", + ) +} + stage('Deploy') { if (env.BRANCH_NAME == 'main' && env.DOCS_DEPLOY_ENABLED == 'yes') { node('CPU') { @@ -1924,4 +1988,41 @@ stage('Deploy') { } } } + // if (env.BRANCH_NAME == 'main' && rebuild_docker_images) { + if (env.BRANCH_NAME == 'PR-11329' && rebuild_docker_images && upstream_revision != null) { + node('CPU') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/deploy-docker") { + try { + withCredentials([string( + credentialsId: 'dockerhub-tlcpackstaging-key', + variable: 'DOCKERHUB_KEY', + )]) { + sh( + script: 'docker login -u tlcpackstaging -p ${DOCKERHUB_KEY}', + label: 'Log in to Docker Hub', + ) + } + def date_Ymd_HMS = sh( + script: 'python -c \'import datetime; print(datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))\'', + label: 'Determine date', + returnStdout: true, + ).trim() + def tag = "${date_Ymd_HMS}-${upstream_revision.substring(0, 8)}" + update_docker(ci_arm, "tlcpackstaging/test_ci_arm:${tag}") + update_docker(ci_cpu, "tlcpackstaging/test_ci_cpu:${tag}") + update_docker(ci_gpu, "tlcpackstaging/test_ci_gpu:${tag}") + update_docker(ci_hexagon, "tlcpackstaging/test_ci_hexagon:${tag}") + update_docker(ci_i386, "tlcpackstaging/test_ci_i386:${tag}") + update_docker(ci_lint, "tlcpackstaging/test_ci_lint:${tag}") + update_docker(ci_qemu, "tlcpackstaging/test_ci_qemu:${tag}") + update_docker(ci_wasm, "tlcpackstaging/test_ci_wasm:${tag}") + } finally { + sh( + script: 'docker logout', + label: 'Clean up login credentials' + ) + } + } + } + } } diff --git a/jenkins/Jenkinsfile.j2 b/jenkins/Jenkinsfile.j2 index b00ee02726..0883e478b4 100644 --- a/jenkins/Jenkinsfile.j2 +++ b/jenkins/Jenkinsfile.j2 @@ -82,6 +82,8 @@ docker_build = 'docker/build.sh' // timeout in minutes max_time = 180 rebuild_docker_images = false +{% set aws_default_region = "us-west-2" %} +{% set aws_ecr_url = "dkr.ecr." + aws_default_region + ".amazonaws.com" %} def per_exec_ws(folder) { return "workspace/exec_${env.EXECUTOR_NUMBER}/" + folder @@ -190,66 +192,111 @@ if (currentBuild.getBuildCauses().toString().contains('BranchIndexingCause')) { cancel_previous_build() -def lint() { -stage('Lint') { - parallel( - {% call m.sharded_lint_step(name='Lint', num_shards=2, node='CPU-SMALL', ws='tvm/lint') %} - {% for image in images %} - {{ image.name }} = params.{{ image.name }}_param ?: {{ image.name }} - {% endfor %} +def run_lint() { + stage('Lint') { + parallel( + {% call m.sharded_lint_step( + name='Lint', + num_shards=2, + node='CPU-SMALL', + ws='tvm/lint', + docker_image="ci_lint") + %} + sh ( + script: "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh", + label: 'Run lint', + ) + {% endcall %} + ) + } +} - sh (script: """ - echo "Docker images being used in this build:" +def prepare() { + stage('Prepare') { + node('CPU-SMALL') { + ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/prepare") { + init_git() {% for image in images %} - echo " {{ image.name }} = ${ {{- image.name -}} }" + {{ image.name }} = params.{{ image.name }}_param ?: {{ image.name }} {% endfor %} - """, label: 'Docker image names') - is_docs_only_build = sh ( - returnStatus: true, - script: './tests/scripts/git_change_docs.sh', - label: 'Check for docs only changes', - ) - skip_ci = should_skip_ci(env.CHANGE_ID) - skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID) - rebuild_docker_images = sh ( - returnStatus: true, - script: './tests/scripts/git_change_docker.sh', - label: 'Check for any docker changes', - ) - if (skip_ci) { - // Don't rebuild when skipping CI - rebuild_docker_images = false - } - if (rebuild_docker_images) { - // Exit before linting so we can use the newly created Docker images - // to run the lint - return + sh (script: """ + echo "Docker images being used in this build:" + {% for image in images %} + echo " {{ image.name }} = ${ {{- image.name -}} }" + {% endfor %} + """, label: 'Docker image names') + + is_docs_only_build = sh ( + returnStatus: true, + script: './tests/scripts/git_change_docs.sh', + label: 'Check for docs only changes', + ) + skip_ci = should_skip_ci(env.CHANGE_ID) + skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID) + rebuild_docker_images = sh ( + returnStatus: true, + script: './tests/scripts/should_rebuild_docker.py', + label: 'Check for any docker changes', + ) + if (skip_ci) { + // Don't rebuild when skipping CI + rebuild_docker_images = false + } } - sh ( - script: "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh", - label: 'Run lint', - ) - {% endcall %} - ) -} + } + } } // [note: method size] // This has to be extracted into a method due to JVM limitations on the size of // a method (so the code can't all be inlined) -lint() +prepare() -def build_image(image_name) { - hash = sh( +def ecr_push(full_name) { + aws_account_id = sh( returnStdout: true, - script: 'git log -1 --format=\'%h\'' + script: 'aws sts get-caller-identity | grep Account | cut -f4 -d\\"', + label: 'Get AWS ID' ).trim() - def full_name = "${image_name}:${env.BRANCH_NAME}-${hash}-${env.BUILD_NUMBER}" - sh( - script: "${docker_build} ${image_name} --spec ${full_name}", - label: 'Build docker image' - ) + + def ecr_name = "${aws_account_id}.{{ aws_ecr_url }}/${full_name}" + try { + withEnv([ + "AWS_ACCOUNT_ID=${aws_account_id}", + 'AWS_DEFAULT_REGION={{ aws_default_region }}', + "AWS_ECR_REPO=${aws_account_id}.{{ aws_ecr_url }}"]) { + sh( + script: ''' + set -eux + aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin $AWS_ECR_REPO + ''', + label: 'Log in to ECR' + ) + sh( + script: """ + set -x + docker tag ${full_name} \$AWS_ECR_REPO/${full_name} + docker push \$AWS_ECR_REPO/${full_name} + """, + label: 'Upload image to ECR' + ) + } + } finally { + withEnv([ + "AWS_ACCOUNT_ID=${aws_account_id}", + 'AWS_DEFAULT_REGION={{ aws_default_region }}', + "AWS_ECR_REPO=${aws_account_id}.{{ aws_ecr_url }}"]) { + sh( + script: 'docker logout $AWS_ECR_REPO', + label: 'Clean up login credentials' + ) + } + } + return ecr_name +} + +def ecr_pull(full_name) { aws_account_id = sh( returnStdout: true, script: 'aws sts get-caller-identity | grep Account | cut -f4 -d\\"', @@ -257,125 +304,73 @@ def build_image(image_name) { ).trim() try { - // Use a credential so Jenkins knows to scrub the AWS account ID which is nice - // (but so we don't have to rely it being hardcoded in Jenkins) - withCredentials([string( - credentialsId: 'aws-account-id', - variable: '_ACCOUNT_ID_DO_NOT_USE', - )]) { - withEnv([ - "AWS_ACCOUNT_ID=${aws_account_id}", - 'AWS_DEFAULT_REGION=us-west-2']) { - sh( - script: ''' - set -x - aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin $AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com - ''', - label: 'Log in to ECR' - ) - sh( - script: """ - set -x - docker tag ${full_name} \$AWS_ACCOUNT_ID.dkr.ecr.\$AWS_DEFAULT_REGION.amazonaws.com/${full_name} - docker push \$AWS_ACCOUNT_ID.dkr.ecr.\$AWS_DEFAULT_REGION.amazonaws.com/${full_name} - """, - label: 'Upload image to ECR' - ) - } + withEnv([ + "AWS_ACCOUNT_ID=${aws_account_id}", + 'AWS_DEFAULT_REGION={{ aws_default_region }}', + "AWS_ECR_REPO=${aws_account_id}.{{ aws_ecr_url }}"]) { + sh( + script: ''' + set -eux + aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin $AWS_ECR_REPO + ''', + label: 'Log in to ECR' + ) + sh( + script: """ + set -eux + docker pull ${full_name} + """, + label: 'Pull image from ECR' + ) } } finally { - sh( - script: 'rm -f ~/.docker/config.json', - label: 'Clean up login credentials' - ) + withEnv([ + "AWS_ACCOUNT_ID=${aws_account_id}", + 'AWS_DEFAULT_REGION={{ aws_default_region }}', + "AWS_ECR_REPO=${aws_account_id}.{{ aws_ecr_url }}"]) { + sh( + script: 'docker logout $AWS_ECR_REPO', + label: 'Clean up login credentials' + ) + } } +} + + +def build_image(image_name) { + hash = sh( + returnStdout: true, + script: 'git log -1 --format=\'%h\'' + ).trim() + def full_name = "${image_name}:${env.BRANCH_NAME}-${hash}-${env.BUILD_NUMBER}" sh( - script: "docker rmi ${full_name}", - label: 'Remove docker image' + script: "${docker_build} ${image_name} --spec ${full_name}", + label: 'Build docker image' ) + return ecr_push(full_name) } if (rebuild_docker_images) { stage('Docker Image Build') { // TODO in a follow up PR: Find ecr tag and use in subsequent builds - parallel 'ci-lint': { - node('CPU') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - build_image('ci_lint') - } - } - }, 'ci-cpu': { - node('CPU') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - build_image('ci_cpu') - } - } - }, 'ci-gpu': { - node('GPU') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - build_image('ci_gpu') - } - } - }, 'ci-qemu': { - node('CPU') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - build_image('ci_qemu') - } - } - }, 'ci-i386': { - node('CPU') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - build_image('ci_i386') - } - } - }, 'ci-arm': { - node('ARM') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - build_image('ci_arm') - } - } - }, 'ci-wasm': { - node('CPU') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - build_image('ci_wasm') - } - } - }, 'ci-hexagon': { - node('CPU') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - build_image('ci_hexagon') + parallel( + {% for image in images %} + '{{ image.name }}': { + node('{{ image.platform }}') { + timeout(time: max_time, unit: 'MINUTES') { + init_git() + {{ image.name }} = build_image('{{ image.name }}') + } } - } - } + }, + {% endfor %} + ) } - // // TODO: Once we are able to use the built images, enable this step - // // If the docker images changed, we need to run the image build before the lint - // // can run since it requires a base docker image. Most of the time the images - // // aren't build though so it's faster to use the same node that checks for - // // docker changes to run the lint in the usual case. - // stage('Sanity Check (re-run)') { - // timeout(time: max_time, unit: 'MINUTES') { - // node('CPU') { - // ws({{ m.per_exec_ws('tvm/sanity') }}) { - // init_git() - // sh ( - // script: "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh", - // label: 'Run lint', - // ) - // } - // } - // } - // } } +// Run the lint with the new Docker image before continuing to builds +run_lint() + // Run make. First try to do an incremental make from a previous workspace in hope to // accelerate the compilation. If something is wrong, clean the workspace and then // build from scratch. @@ -454,6 +449,19 @@ def add_microtvm_permissions() { } +def docker_init(image) { + if (image.contains("amazonaws.com")) { + // If this string is in the image name it's from ECR and needs to be pulled + // with the right credentials + ecr_pull(image) + } else { + sh( + script: "docker pull ${image}", + label: 'Pull docker image', + ) + } +} + def build() { stage('Build') { environment { @@ -465,6 +473,7 @@ stage('Build') { node('CPU-SMALL') { ws({{ m.per_exec_ws('tvm/build-gpu') }}) { init_git() + docker_init(ci_gpu) sh "${docker_run} --no-gpu ${ci_gpu} ./tests/scripts/task_config_build_gpu.sh build" make("${ci_gpu} --no-gpu", 'build', '-j2') {{ m.upload_artifacts(tag='gpu', filenames=tvm_multilib, folders=microtvm_template_projects) }} @@ -482,6 +491,7 @@ stage('Build') { node('CPU-SMALL') { ws({{ m.per_exec_ws('tvm/build-cpu') }}) { init_git() + docker_init(ci_cpu) sh ( script: "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh build", label: 'Create CPU cmake config', @@ -505,6 +515,7 @@ stage('Build') { node('CPU-SMALL') { ws({{ m.per_exec_ws('tvm/build-wasm') }}) { init_git() + docker_init(ci_wasm) sh ( script: "${docker_run} ${ci_wasm} ./tests/scripts/task_config_build_wasm.sh build", label: 'Create WASM cmake config', @@ -529,6 +540,7 @@ stage('Build') { node('CPU-SMALL') { ws({{ m.per_exec_ws('tvm/build-i386') }}) { init_git() + docker_init(ci_i386) sh ( script: "${docker_run} ${ci_i386} ./tests/scripts/task_config_build_i386.sh build", label: 'Create i386 cmake config', @@ -546,6 +558,7 @@ stage('Build') { node('ARM') { ws({{ m.per_exec_ws('tvm/build-arm') }}) { init_git() + docker_init(ci_arm) sh ( script: "${docker_run} ${ci_arm} ./tests/scripts/task_config_build_arm.sh build", label: 'Create ARM cmake config', @@ -563,6 +576,7 @@ stage('Build') { node('CPU-SMALL') { ws({{ m.per_exec_ws('tvm/build-qemu') }}) { init_git() + docker_init(ci_qemu) sh ( script: "${docker_run} ${ci_qemu} ./tests/scripts/task_config_build_qemu.sh build", label: 'Create QEMU cmake config', @@ -580,6 +594,7 @@ stage('Build') { node('CPU-SMALL') { ws({{ m.per_exec_ws('tvm/build-hexagon') }}) { init_git() + docker_init(ci_hexagon) sh ( script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_config_build_hexagon.sh build", label: 'Create Hexagon cmake config', @@ -611,6 +626,7 @@ stage('Test') { node="GPU", ws="tvm/ut-python-gpu", platform="gpu", + docker_image="ci_gpu", ) %} {% if shard_index == 1 %} {{ m.download_artifacts(tag='gpu2', filenames=tvm_multilib) }} @@ -644,6 +660,7 @@ stage('Test') { num_shards=2, ws="tvm/integration-python-cpu", platform="cpu", + docker_image="ci_cpu", ) %} {{ m.download_artifacts(tag='cpu', filenames=tvm_multilib_tsim) }} ci_setup(ci_cpu) @@ -657,6 +674,7 @@ stage('Test') { node="CPU-SMALL", ws="tvm/ut-python-cpu", platform="cpu", + docker_image="ci_cpu", ) %} {{ m.download_artifacts(tag='cpu', filenames=tvm_multilib_tsim) }} ci_setup(ci_cpu) @@ -674,6 +692,7 @@ stage('Test') { num_shards=3, ws="tvm/integration-python-i386", platform="i386", + docker_image="ci_i386", ) %} {{ m.download_artifacts(tag='i386', filenames=tvm_multilib) }} ci_setup(ci_i386) @@ -693,6 +712,7 @@ stage('Test') { ws="tvm/test-hexagon", platform="hexagon", num_shards=4, + docker_image="ci_hexagon", ) %} {{ m.download_artifacts(tag='hexagon', filenames=tvm_lib) }} ci_setup(ci_hexagon) @@ -713,6 +733,7 @@ stage('Test') { node="CPU-SMALL", ws="tvm/test-qemu", platform="qemu", + docker_image="ci_qemu", ) %} {{ m.download_artifacts(tag='qemu', filenames=tvm_lib, folders=microtvm_template_projects) }} add_microtvm_permissions() @@ -732,6 +753,7 @@ stage('Test') { node="ARM", ws="tvm/ut-python-arm", platform="arm", + docker_image="ci_arm", ) %} {{ m.download_artifacts(tag='arm', filenames=tvm_multilib) }} ci_setup(ci_arm) @@ -750,6 +772,7 @@ stage('Test') { num_shards=2, node="ARM", ws="tvm/ut-python-arm", platform="arm", + docker_image="ci_arm", ) %} {{ m.download_artifacts(tag='arm', filenames=tvm_multilib) }} ci_setup(ci_arm) @@ -765,6 +788,7 @@ stage('Test') { num_shards=2, ws="tvm/topi-python-gpu", platform="gpu", + docker_image="ci_gpu", ) %} {{ m.download_artifacts(tag='gpu', filenames=tvm_multilib) }} ci_setup(ci_gpu) @@ -778,6 +802,7 @@ stage('Test') { num_shards=3, ws="tvm/frontend-python-gpu", platform="gpu", + docker_image="ci_gpu", ) %} {{ m.download_artifacts(tag='gpu', filenames=tvm_multilib) }} ci_setup(ci_gpu) @@ -791,6 +816,7 @@ stage('Test') { node="CPU", ws="tvm/frontend-python-cpu", platform="cpu", + docker_image="ci_cpu", ) %} {{ m.download_artifacts(tag='cpu', filenames=tvm_multilib) }} ci_setup(ci_cpu) @@ -804,6 +830,7 @@ stage('Test') { node="ARM", ws="tvm/frontend-python-arm", platform="arm", + docker_image="ci_arm", ) %} {{ m.download_artifacts(tag='arm', filenames=tvm_multilib) }} ci_setup(ci_arm) @@ -817,6 +844,7 @@ stage('Test') { node('GPU') { ws({{ m.per_exec_ws('tvm/docs-python-gpu') }}) { init_git() + docker_init(ci_gpu) {{ m.download_artifacts(tag='gpu', filenames=tvm_multilib, folders=microtvm_template_projects) }} add_microtvm_permissions() timeout(time: 180, unit: 'MINUTES') { @@ -897,6 +925,25 @@ def deploy_docs() { } } + +def update_docker(ecr_image, hub_image) { + if (!ecr_image.contains("amazonaws.com")) { + sh("echo Skipping '${ecr_image}' since it doesn't look like an ECR image") + return + } + sh( + script: """ + set -eux + docker pull ${ecr_image} + docker tag \ + ${ecr_image} \ + ${hub_image} + docker push ${hub_image} + """, + label: "Update ${hub_image} on Docker Hub", + ) +} + stage('Deploy') { if (env.BRANCH_NAME == 'main' && env.DOCS_DEPLOY_ENABLED == 'yes') { node('CPU') { @@ -906,4 +953,36 @@ stage('Deploy') { } } } + // if (env.BRANCH_NAME == 'main' && rebuild_docker_images) { + if (env.BRANCH_NAME == 'PR-11329' && rebuild_docker_images && upstream_revision != null) { + node('CPU') { + ws({{ m.per_exec_ws('tvm/deploy-docker') }}) { + try { + withCredentials([string( + credentialsId: 'dockerhub-tlcpackstaging-key', + variable: 'DOCKERHUB_KEY', + )]) { + sh( + script: 'docker login -u tlcpackstaging -p ${DOCKERHUB_KEY}', + label: 'Log in to Docker Hub', + ) + } + def date_Ymd_HMS = sh( + script: 'python -c \'import datetime; print(datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))\'', + label: 'Determine date', + returnStdout: true, + ).trim() + def tag = "${date_Ymd_HMS}-${upstream_revision.substring(0, 8)}" + {% for image in images %} + update_docker({{ image.name }}, "tlcpackstaging/test_{{ image.name }}:${tag}") + {% endfor %} + } finally { + sh( + script: 'docker logout', + label: 'Clean up login credentials' + ) + } + } + } + } } diff --git a/jenkins/macros.j2 b/jenkins/macros.j2 index ce29aa2d58..2a2148562d 100644 --- a/jenkins/macros.j2 +++ b/jenkins/macros.j2 @@ -19,7 +19,7 @@ "workspace/exec_${env.EXECUTOR_NUMBER}/{{ folder }}" {%- endmacro -%} -{% macro sharded_test_step(name, num_shards, node, ws, platform) %} +{% macro sharded_test_step(name, num_shards, node, ws, platform, docker_image) %} {% for shard_index in range(1, num_shards + 1) %} '{{ name }} {{ shard_index }} of {{ num_shards }}': { if (!skip_ci && is_docs_only_build != 1) { @@ -27,6 +27,7 @@ ws({{ per_exec_ws(ws) }}) { try { init_git() + docker_init({{ docker_image }}) timeout(time: max_time, unit: 'MINUTES') { withEnv([ 'PLATFORM={{ platform }}', @@ -47,12 +48,13 @@ {% endfor %} {% endmacro %} -{% macro sharded_lint_step(name, num_shards, node, ws) %} +{% macro sharded_lint_step(name, num_shards, node, ws, docker_image) %} {% for shard_index in range(1, num_shards + 1) %} '{{ name }} {{ shard_index }} of {{ num_shards }}': { node('{{ node }}') { ws({{ per_exec_ws(ws) }}) { init_git() + docker_init({{ docker_image }}) timeout(time: max_time, unit: 'MINUTES') { withEnv([ 'TVM_NUM_SHARDS={{ num_shards }}', @@ -67,7 +69,7 @@ {% endmacro %} -{% macro test_step(name, node, ws, platform) %} +{% macro test_step(name, node, ws, platform, docker_image) %} '{{ name }}': { if (!skip_ci && is_docs_only_build != 1) { node('{{ node }}') { @@ -75,6 +77,7 @@ timeout(time: max_time, unit: 'MINUTES') { try { init_git() + docker_init({{ docker_image }}) withEnv(['PLATFORM={{ platform }}'], { {{ caller() | indent(width=12) | trim }} }) diff --git a/tests/python/ci/test_ci.py b/tests/python/ci/test_ci.py index e197d7e48a..b412a7067a 100644 --- a/tests/python/ci/test_ci.py +++ b/tests/python/ci/test_ci.py @@ -18,8 +18,10 @@ import subprocess import sys import json +from tempfile import tempdir import textwrap import pytest +from pathlib import Path from test_utils import REPO_ROOT @@ -28,11 +30,13 @@ class TempGit: def __init__(self, cwd): self.cwd = cwd - def run(self, *args): - proc = subprocess.run(["git"] + list(args), cwd=self.cwd) + def run(self, *args, **kwargs): + proc = subprocess.run(["git"] + list(args), encoding="utf-8", cwd=self.cwd, **kwargs) if proc.returncode != 0: raise RuntimeError(f"git command failed: '{args}'") + return proc + def test_cc_reviewers(tmpdir_factory): reviewers_script = REPO_ROOT / "tests" / "scripts" / "github_cc_reviewers.py" @@ -731,5 +735,92 @@ def test_github_tag_teams(tmpdir_factory): ) [email protected]( + "changed_files,name,check,expected_code", + [ + d.values() + for d in [ + dict( + changed_files=[], + name="abc", + check="Image abc is not using new naming scheme", + expected_code=1, + ), + dict( + changed_files=[], name="123-123-abc", check="No extant hash found", expected_code=1 + ), + dict( + changed_files=[["test.txt"]], + name=None, + check="Did not find changes, no rebuild necessary", + expected_code=0, + ), + dict( + changed_files=[["test.txt"], ["docker/test.txt"]], + name=None, + check="Found docker changes", + expected_code=2, + ), + ] + ], +) +def test_should_rebuild_docker(tmpdir_factory, changed_files, name, check, expected_code): + tag_script = REPO_ROOT / "tests" / "scripts" / "should_rebuild_docker.py" + + git = TempGit(tmpdir_factory.mktemp("tmp_git_dir")) + git.run("init") + git.run("checkout", "-b", "main") + git.run("remote", "add", "origin", "https://github.com/apache/tvm.git") + + git_path = Path(git.cwd) + for i, commits in enumerate(changed_files): + for filename in commits: + path = git_path / filename + path.parent.mkdir(exist_ok=True, parents=True) + path.touch() + git.run("add", filename) + + git.run("commit", "-m", f"message {i}") + + if name is None: + ref = "HEAD" + if len(changed_files) > 1: + ref = f"HEAD~{len(changed_files) - 1}" + proc = git.run("rev-parse", ref, stdout=subprocess.PIPE) + last_hash = proc.stdout.strip() + name = f"123-123-{last_hash}" + + docker_data = { + "repositories/tlcpack": { + "results": [ + { + "name": "ci-something", + }, + { + "name": "something-else", + }, + ], + }, + "repositories/tlcpack/ci-something/tags": { + "results": [{"name": name}, {"name": name + "old"}], + }, + } + + proc = subprocess.run( + [ + str(tag_script), + "--testing-docker-data", + json.dumps(docker_data), + ], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding="utf-8", + cwd=git.cwd, + ) + + assert_in(check, proc.stdout) + assert proc.returncode == expected_code + + if __name__ == "__main__": sys.exit(pytest.main([__file__] + sys.argv[1:])) diff --git a/tests/scripts/cmd_utils.py b/tests/scripts/cmd_utils.py index 272086796e..771c3ee52d 100644 --- a/tests/scripts/cmd_utils.py +++ b/tests/scripts/cmd_utils.py @@ -44,18 +44,21 @@ def init_log(): class Sh: - def __init__(self, env=None): + def __init__(self, env=None, cwd=None): self.env = os.environ.copy() if env is not None: self.env.update(env) + self.cwd = cwd def run(self, cmd: str, **kwargs): logging.info(f"+ {cmd}") - if "check" not in kwargs: - kwargs["check"] = True - if "shell" not in kwargs: - kwargs["shell"] = True - if "env" not in kwargs: - kwargs["env"] = self.env - - subprocess.run(cmd, **kwargs) + defaults = { + "check": True, + "shell": True, + "env": self.env, + "encoding": "utf-8", + "cwd": self.cwd, + } + defaults.update(kwargs) + + return subprocess.run(cmd, **defaults) diff --git a/tests/scripts/git_utils.py b/tests/scripts/git_utils.py index bc00bdf127..1fceb908ed 100644 --- a/tests/scripts/git_utils.py +++ b/tests/scripts/git_utils.py @@ -19,6 +19,7 @@ import json import subprocess import re +import logging from urllib import request from typing import Dict, Tuple, Any, Optional, List diff --git a/tests/scripts/http_utils.py b/tests/scripts/http_utils.py new file mode 100644 index 0000000000..c14259479d --- /dev/null +++ b/tests/scripts/http_utils.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import json +import logging +from urllib import request +from typing import Dict, Any, Optional + + +def get(url: str, headers: Optional[Dict[str, str]] = None) -> Dict[str, Any]: + logging.info(f"Requesting GET to {url}") + if headers is None: + headers = {} + req = request.Request(url, headers=headers) + with request.urlopen(req) as response: + response_headers = {k: v for k, v in response.getheaders()} + response = json.loads(response.read()) + + return response, response_headers diff --git a/tests/scripts/should_rebuild_docker.py b/tests/scripts/should_rebuild_docker.py new file mode 100755 index 0000000000..dc12c38de8 --- /dev/null +++ b/tests/scripts/should_rebuild_docker.py @@ -0,0 +1,154 @@ +#!/usr/bin/env python3 +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +import argparse +import datetime +import json +import logging +import subprocess + +from typing import Dict, Any, List + + +from http_utils import get +from cmd_utils import Sh, init_log + + +DOCKER_API_BASE = "https://hub.docker.com/v2/" +PAGE_SIZE = 25 +TEST_DATA = None + + +def docker_api(url: str) -> Dict[str, Any]: + """ + Run a paginated fetch from the public Docker Hub API + """ + if TEST_DATA is not None: + return TEST_DATA[url] + pagination = f"?page_size={PAGE_SIZE}&page=1" + url = DOCKER_API_BASE + url + pagination + r, headers = get(url) + reset = headers.get("x-ratelimit-reset") + if reset is not None: + reset = datetime.datetime.fromtimestamp(int(reset)) + reset = reset.isoformat() + logging.info( + f"Docker API Rate Limit: {headers.get('x-ratelimit-remaining')} / {headers.get('x-ratelimit-limit')} (reset at {reset})" + ) + if "results" not in r: + raise RuntimeError(f"Error fetching data, no results found in: {r}") + return r + + +def any_docker_changes_since(hash: str) -> bool: + """ + Check the docker/ directory, return True if there have been any code changes + since the specified hash + """ + sh = Sh() + cmd = f"git diff {hash} -- docker/" + proc = sh.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + stdout = proc.stdout.strip() + return stdout != "", stdout + + +def does_commit_exist(hash: str) -> bool: + """ + Returns True if the hash exists in the repo + """ + sh = Sh() + cmd = f"git rev-parse -q {hash}" + proc = sh.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=False) + print(proc.stdout) + if proc.returncode == 0: + return True + + if "unknown revision or path not in the working tree" in proc.stdout: + return False + + raise RuntimeError(f"Unexpected failure when running: {cmd}") + + +def find_hash_for_tag(tag: Dict[str, Any]) -> str: + """ + Split the hash off of a name like <date>-<time>-<hash> + """ + name = tag["name"] + name_parts = name.split("-") + if len(name_parts) != 3: + raise RuntimeError(f"Image {name} is not using new naming scheme") + shorthash = name_parts[2] + return shorthash + + +def find_commit_in_repo(tags: List[Dict[str, Any]]): + """ + Look through all the docker tags, find the most recent one which references + a commit that is present in the repo + """ + for tag in tags["results"]: + shorthash = find_hash_for_tag(tag) + logging.info(f"Hash '{shorthash}' does not exist in repo") + if does_commit_exist(shorthash): + return shorthash, tag + + raise RuntimeError(f"No extant hash found in tags:\n{tags}") + + +def main(): + # Fetch all tlcpack images + images = docker_api("repositories/tlcpack") + + # Ignore all non-ci images + relevant_images = [image for image in images["results"] if image["name"].startswith("ci-")] + image_names = [image["name"] for image in relevant_images] + logging.info(f"Found {len(relevant_images)} images to check: {', '.join(image_names)}") + + for image in relevant_images: + # Check the tags for the image + tags = docker_api(f"repositories/tlcpack/{image['name']}/tags") + + # Find the hash of the most recent tag + shorthash, tag = find_commit_in_repo(tags) + name = tag["name"] + logging.info(f"Looking for docker/ changes since {shorthash}") + + any_docker_changes, diff = any_docker_changes_since(shorthash) + if any_docker_changes: + logging.info(f"Found docker changes from {shorthash} when checking {name}") + logging.info(diff) + exit(2) + + logging.info("Did not find changes, no rebuild necessary") + exit(0) + + +if __name__ == "__main__": + init_log() + parser = argparse.ArgumentParser( + description="Exits 0 if Docker images don't need to be rebuilt, 1 otherwise" + ) + parser.add_argument( + "--testing-docker-data", + help="(testing only) JSON data to mock response from Docker Hub API", + ) + args = parser.parse_args() + + if args.testing_docker_data is not None: + TEST_DATA = json.loads(args.testing_docker_data) + + main()
