This is an automated email from the ASF dual-hosted git repository.

areusch pushed a commit to branch areusch/freeze-dependencies
in repository https://gitbox.apache.org/repos/asf/tvm.git

commit 03e1f202bf061b4191b9d3c17e9deb53d5d10074
Author: Andrew Reusch <[email protected]>
AuthorDate: Thu May 19 15:26:47 2022 -0700

    Patch publish Jenkinsfiles PR.
---
 Jenkinsfile                            | 511 ++++++++++++++++++++-------------
 jenkins/Jenkinsfile.j2                 | 397 +++++++++++++++----------
 jenkins/macros.j2                      |   9 +-
 tests/python/ci/test_ci.py             |  95 +++++-
 tests/scripts/cmd_utils.py             |  21 +-
 tests/scripts/git_utils.py             |   1 +
 tests/scripts/http_utils.py            |  34 +++
 tests/scripts/should_rebuild_docker.py | 154 ++++++++++
 8 files changed, 846 insertions(+), 376 deletions(-)

diff --git a/Jenkinsfile b/Jenkinsfile
index 424f97494d..f0665bc4c1 100755
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -45,7 +45,7 @@
 // 'python3 jenkins/generate.py'
 // Note: This timestamp is here to ensure that updates to the Jenkinsfile are
 // always rebased on main before merging:
-// Generated at 2022-05-17T17:26:21.660243
+// Generated at 2022-05-19T12:14:11.652883
 
 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils
 // NOTE: these lines are scanned by docker/dev_common.sh. Please update the 
regex as needed. -->
@@ -193,63 +193,22 @@ if 
(currentBuild.getBuildCauses().toString().contains('BranchIndexingCause')) {
 
 cancel_previous_build()
 
-def lint() {
-stage('Lint') {
-  parallel(
+def run_lint() {
+  stage('Lint') {
+    parallel(
   'Lint 1 of 2': {
     node('CPU-SMALL') {
       ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/lint") {
         init_git()
+        docker_init(ci_lint)
         timeout(time: max_time, unit: 'MINUTES') {
           withEnv([
             'TVM_NUM_SHARDS=2',
             'TVM_SHARD_INDEX=0'], {
-            ci_arm = params.ci_arm_param ?: ci_arm
-            ci_cpu = params.ci_cpu_param ?: ci_cpu
-            ci_gpu = params.ci_gpu_param ?: ci_gpu
-            ci_hexagon = params.ci_hexagon_param ?: ci_hexagon
-            ci_i386 = params.ci_i386_param ?: ci_i386
-            ci_lint = params.ci_lint_param ?: ci_lint
-            ci_qemu = params.ci_qemu_param ?: ci_qemu
-            ci_wasm = params.ci_wasm_param ?: ci_wasm
-
-            sh (script: """
-              echo "Docker images being used in this build:"
-              echo " ci_arm = ${ci_arm}"
-              echo " ci_cpu = ${ci_cpu}"
-              echo " ci_gpu = ${ci_gpu}"
-              echo " ci_hexagon = ${ci_hexagon}"
-              echo " ci_i386 = ${ci_i386}"
-              echo " ci_lint = ${ci_lint}"
-              echo " ci_qemu = ${ci_qemu}"
-              echo " ci_wasm = ${ci_wasm}"
-            """, label: 'Docker image names')
-
-            is_docs_only_build = sh (
-              returnStatus: true,
-              script: './tests/scripts/git_change_docs.sh',
-              label: 'Check for docs only changes',
-            )
-            skip_ci = should_skip_ci(env.CHANGE_ID)
-            skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID)
-            rebuild_docker_images = sh (
-              returnStatus: true,
-              script: './tests/scripts/git_change_docker.sh',
-              label: 'Check for any docker changes',
-            )
-            if (skip_ci) {
-              // Don't rebuild when skipping CI
-              rebuild_docker_images = false
-            }
-            if (rebuild_docker_images) {
-              // Exit before linting so we can use the newly created Docker 
images
-              // to run the lint
-              return
-            }
             sh (
-              script: "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh",
-              label: 'Run lint',
-            )
+                script: "${docker_run} ${ci_lint} 
./tests/scripts/task_lint.sh",
+                label: 'Run lint',
+              )
           })
         }
       }
@@ -259,80 +218,120 @@ stage('Lint') {
     node('CPU-SMALL') {
       ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/lint") {
         init_git()
+        docker_init(ci_lint)
         timeout(time: max_time, unit: 'MINUTES') {
           withEnv([
             'TVM_NUM_SHARDS=2',
             'TVM_SHARD_INDEX=1'], {
-            ci_arm = params.ci_arm_param ?: ci_arm
-            ci_cpu = params.ci_cpu_param ?: ci_cpu
-            ci_gpu = params.ci_gpu_param ?: ci_gpu
-            ci_hexagon = params.ci_hexagon_param ?: ci_hexagon
-            ci_i386 = params.ci_i386_param ?: ci_i386
-            ci_lint = params.ci_lint_param ?: ci_lint
-            ci_qemu = params.ci_qemu_param ?: ci_qemu
-            ci_wasm = params.ci_wasm_param ?: ci_wasm
-
-            sh (script: """
-              echo "Docker images being used in this build:"
-              echo " ci_arm = ${ci_arm}"
-              echo " ci_cpu = ${ci_cpu}"
-              echo " ci_gpu = ${ci_gpu}"
-              echo " ci_hexagon = ${ci_hexagon}"
-              echo " ci_i386 = ${ci_i386}"
-              echo " ci_lint = ${ci_lint}"
-              echo " ci_qemu = ${ci_qemu}"
-              echo " ci_wasm = ${ci_wasm}"
-            """, label: 'Docker image names')
-
-            is_docs_only_build = sh (
-              returnStatus: true,
-              script: './tests/scripts/git_change_docs.sh',
-              label: 'Check for docs only changes',
-            )
-            skip_ci = should_skip_ci(env.CHANGE_ID)
-            skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID)
-            rebuild_docker_images = sh (
-              returnStatus: true,
-              script: './tests/scripts/git_change_docker.sh',
-              label: 'Check for any docker changes',
-            )
-            if (skip_ci) {
-              // Don't rebuild when skipping CI
-              rebuild_docker_images = false
-            }
-            if (rebuild_docker_images) {
-              // Exit before linting so we can use the newly created Docker 
images
-              // to run the lint
-              return
-            }
             sh (
-              script: "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh",
-              label: 'Run lint',
-            )
+                script: "${docker_run} ${ci_lint} 
./tests/scripts/task_lint.sh",
+                label: 'Run lint',
+              )
           })
         }
       }
     }
   },
-  )
+    )
+  }
 }
+
+def prepare() {
+  stage('Prepare') {
+    node('CPU-SMALL') {
+      ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/prepare") {
+        init_git()
+        ci_arm = params.ci_arm_param ?: ci_arm
+        ci_cpu = params.ci_cpu_param ?: ci_cpu
+        ci_gpu = params.ci_gpu_param ?: ci_gpu
+        ci_hexagon = params.ci_hexagon_param ?: ci_hexagon
+        ci_i386 = params.ci_i386_param ?: ci_i386
+        ci_lint = params.ci_lint_param ?: ci_lint
+        ci_qemu = params.ci_qemu_param ?: ci_qemu
+        ci_wasm = params.ci_wasm_param ?: ci_wasm
+
+        sh (script: """
+          echo "Docker images being used in this build:"
+          echo " ci_arm = ${ci_arm}"
+          echo " ci_cpu = ${ci_cpu}"
+          echo " ci_gpu = ${ci_gpu}"
+          echo " ci_hexagon = ${ci_hexagon}"
+          echo " ci_i386 = ${ci_i386}"
+          echo " ci_lint = ${ci_lint}"
+          echo " ci_qemu = ${ci_qemu}"
+          echo " ci_wasm = ${ci_wasm}"
+        """, label: 'Docker image names')
+
+        is_docs_only_build = sh (
+          returnStatus: true,
+          script: './tests/scripts/git_change_docs.sh',
+          label: 'Check for docs only changes',
+        )
+        skip_ci = should_skip_ci(env.CHANGE_ID)
+        skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID)
+        rebuild_docker_images = sh (
+          returnStatus: true,
+          script: './tests/scripts/should_rebuild_docker.py',
+          label: 'Check for any docker changes',
+        )
+        if (skip_ci) {
+          // Don't rebuild when skipping CI
+          rebuild_docker_images = false
+        }
+      }
+    }
+  }
 }
 
 // [note: method size]
 // This has to be extracted into a method due to JVM limitations on the size of
 // a method (so the code can't all be inlined)
-lint()
+prepare()
 
-def build_image(image_name) {
-  hash = sh(
+def ecr_push(full_name) {
+  aws_account_id = sh(
     returnStdout: true,
-    script: 'git log -1 --format=\'%h\''
+    script: 'aws sts get-caller-identity | grep Account | cut -f4 -d\\"',
+    label: 'Get AWS ID'
   ).trim()
-  def full_name = 
"${image_name}:${env.BRANCH_NAME}-${hash}-${env.BUILD_NUMBER}"
-  sh(
-    script: "${docker_build} ${image_name} --spec ${full_name}",
-    label: 'Build docker image'
-  )
+
+  def ecr_name = 
"${aws_account_id}.dkr.ecr.us-west-2.amazonaws.com/${full_name}"
+  try {
+    withEnv([
+      "AWS_ACCOUNT_ID=${aws_account_id}",
+      'AWS_DEFAULT_REGION=us-west-2',
+      "AWS_ECR_REPO=${aws_account_id}.dkr.ecr.us-west-2.amazonaws.com"]) {
+      sh(
+        script: '''
+          set -eux
+          aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker 
login --username AWS --password-stdin $AWS_ECR_REPO
+        ''',
+        label: 'Log in to ECR'
+      )
+      sh(
+        script: """
+          set -x
+          docker tag ${full_name} \$AWS_ECR_REPO/${full_name}
+          docker push \$AWS_ECR_REPO/${full_name}
+        """,
+        label: 'Upload image to ECR'
+      )
+    }
+  } finally {
+    withEnv([
+      "AWS_ACCOUNT_ID=${aws_account_id}",
+      'AWS_DEFAULT_REGION=us-west-2',
+      "AWS_ECR_REPO=${aws_account_id}.dkr.ecr.us-west-2.amazonaws.com"]) {
+      sh(
+        script: 'docker logout $AWS_ECR_REPO',
+        label: 'Clean up login credentials'
+      )
+    }
+  }
+  return ecr_name
+}
+
+def ecr_pull(full_name) {
   aws_account_id = sh(
     returnStdout: true,
     script: 'aws sts get-caller-identity | grep Account | cut -f4 -d\\"',
@@ -340,125 +339,127 @@ def build_image(image_name) {
   ).trim()
 
   try {
-    // Use a credential so Jenkins knows to scrub the AWS account ID which is 
nice
-    // (but so we don't have to rely it being hardcoded in Jenkins)
-    withCredentials([string(
-      credentialsId: 'aws-account-id',
-      variable: '_ACCOUNT_ID_DO_NOT_USE',
-      )]) {
-      withEnv([
-        "AWS_ACCOUNT_ID=${aws_account_id}",
-        'AWS_DEFAULT_REGION=us-west-2']) {
-        sh(
-          script: '''
-            set -x
-            aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker 
login --username AWS --password-stdin 
$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com
-          ''',
-          label: 'Log in to ECR'
-        )
-        sh(
-          script: """
-            set -x
-            docker tag ${full_name} 
\$AWS_ACCOUNT_ID.dkr.ecr.\$AWS_DEFAULT_REGION.amazonaws.com/${full_name}
-            docker push 
\$AWS_ACCOUNT_ID.dkr.ecr.\$AWS_DEFAULT_REGION.amazonaws.com/${full_name}
-          """,
-          label: 'Upload image to ECR'
-        )
-      }
+    withEnv([
+      "AWS_ACCOUNT_ID=${aws_account_id}",
+      'AWS_DEFAULT_REGION=us-west-2',
+      "AWS_ECR_REPO=${aws_account_id}.dkr.ecr.us-west-2.amazonaws.com"]) {
+      sh(
+        script: '''
+          set -eux
+          aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker 
login --username AWS --password-stdin $AWS_ECR_REPO
+        ''',
+        label: 'Log in to ECR'
+      )
+      sh(
+        script: """
+          set -eux
+          docker pull ${full_name}
+        """,
+        label: 'Pull image from ECR'
+      )
     }
   } finally {
-    sh(
-      script: 'rm -f ~/.docker/config.json',
-      label: 'Clean up login credentials'
-    )
+    withEnv([
+      "AWS_ACCOUNT_ID=${aws_account_id}",
+      'AWS_DEFAULT_REGION=us-west-2',
+      "AWS_ECR_REPO=${aws_account_id}.dkr.ecr.us-west-2.amazonaws.com"]) {
+      sh(
+        script: 'docker logout $AWS_ECR_REPO',
+        label: 'Clean up login credentials'
+      )
+    }
   }
+}
+
+
+def build_image(image_name) {
+  hash = sh(
+    returnStdout: true,
+    script: 'git log -1 --format=\'%h\''
+  ).trim()
+  def full_name = 
"${image_name}:${env.BRANCH_NAME}-${hash}-${env.BUILD_NUMBER}"
   sh(
-    script: "docker rmi ${full_name}",
-    label: 'Remove docker image'
+    script: "${docker_build} ${image_name} --spec ${full_name}",
+    label: 'Build docker image'
   )
+  return ecr_push(full_name)
 }
 
 if (rebuild_docker_images) {
   stage('Docker Image Build') {
     // TODO in a follow up PR: Find ecr tag and use in subsequent builds
-    parallel 'ci-lint': {
-      node('CPU') {
-        timeout(time: max_time, unit: 'MINUTES') {
-          init_git()
-          build_image('ci_lint')
+    parallel(
+      'ci_arm': {
+        node('ARM') {
+          timeout(time: max_time, unit: 'MINUTES') {
+            init_git()
+            ci_arm = build_image('ci_arm')
+          }
         }
-      }
-    }, 'ci-cpu': {
-      node('CPU') {
-        timeout(time: max_time, unit: 'MINUTES') {
-          init_git()
-          build_image('ci_cpu')
+      },
+      'ci_cpu': {
+        node('CPU') {
+          timeout(time: max_time, unit: 'MINUTES') {
+            init_git()
+            ci_cpu = build_image('ci_cpu')
+          }
         }
-      }
-    }, 'ci-gpu': {
-      node('GPU') {
-        timeout(time: max_time, unit: 'MINUTES') {
-          init_git()
-          build_image('ci_gpu')
+      },
+      'ci_gpu': {
+        node('CPU') {
+          timeout(time: max_time, unit: 'MINUTES') {
+            init_git()
+            ci_gpu = build_image('ci_gpu')
+          }
         }
-      }
-    }, 'ci-qemu': {
-      node('CPU') {
-        timeout(time: max_time, unit: 'MINUTES') {
-          init_git()
-          build_image('ci_qemu')
+      },
+      'ci_hexagon': {
+        node('CPU') {
+          timeout(time: max_time, unit: 'MINUTES') {
+            init_git()
+            ci_hexagon = build_image('ci_hexagon')
+          }
         }
-      }
-    }, 'ci-i386': {
-      node('CPU') {
-        timeout(time: max_time, unit: 'MINUTES') {
-          init_git()
-          build_image('ci_i386')
+      },
+      'ci_i386': {
+        node('CPU') {
+          timeout(time: max_time, unit: 'MINUTES') {
+            init_git()
+            ci_i386 = build_image('ci_i386')
+          }
         }
-      }
-    }, 'ci-arm': {
-      node('ARM') {
-        timeout(time: max_time, unit: 'MINUTES') {
-          init_git()
-          build_image('ci_arm')
+      },
+      'ci_lint': {
+        node('CPU') {
+          timeout(time: max_time, unit: 'MINUTES') {
+            init_git()
+            ci_lint = build_image('ci_lint')
+          }
         }
-      }
-    }, 'ci-wasm': {
-      node('CPU') {
-        timeout(time: max_time, unit: 'MINUTES') {
-          init_git()
-          build_image('ci_wasm')
+      },
+      'ci_qemu': {
+        node('CPU') {
+          timeout(time: max_time, unit: 'MINUTES') {
+            init_git()
+            ci_qemu = build_image('ci_qemu')
+          }
         }
-      }
-    }, 'ci-hexagon': {
-      node('CPU') {
-        timeout(time: max_time, unit: 'MINUTES') {
-          init_git()
-          build_image('ci_hexagon')
+      },
+      'ci_wasm': {
+        node('CPU') {
+          timeout(time: max_time, unit: 'MINUTES') {
+            init_git()
+            ci_wasm = build_image('ci_wasm')
+          }
         }
-      }
-    }
+      },
+    )
   }
-  // // TODO: Once we are able to use the built images, enable this step
-  // // If the docker images changed, we need to run the image build before 
the lint
-  // // can run since it requires a base docker image. Most of the time the 
images
-  // // aren't build though so it's faster to use the same node that checks for
-  // // docker changes to run the lint in the usual case.
-  // stage('Sanity Check (re-run)') {
-  //   timeout(time: max_time, unit: 'MINUTES') {
-  //     node('CPU') {
-  //       ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/sanity") {
-  //         init_git()
-  //         sh (
-  //           script: "${docker_run} ${ci_lint}  
./tests/scripts/task_lint.sh",
-  //           label: 'Run lint',
-  //         )
-  //       }
-  //     }
-  //   }
-  // }
 }
 
+// Run the lint with the new Docker image before continuing to builds
+run_lint()
+
 // Run make. First try to do an incremental make from a previous workspace in 
hope to
 // accelerate the compilation. If something is wrong, clean the workspace and 
then
 // build from scratch.
@@ -565,16 +566,31 @@ def cpp_unittest(image) {
   )
 }
 
+def docker_init(image) {
+  if (image.contains("amazonaws.com")) {
+    // If this string is in the image name it's from ECR and needs to be pulled
+    // with the right credentials
+    ecr_pull(image)
+  } else {
+    sh(
+      script: "docker pull ${image}",
+      label: 'Pull docker image',
+    )
+  }
+}
+
 def build() {
 stage('Build') {
   environment {
     SKIP_SLOW_TESTS = "${skip_slow_tests}"
   }
-  parallel 'BUILD: GPU': {
+  parallel(
+    'BUILD: GPU': {
     if (!skip_ci) {
       node('CPU-SMALL') {
         ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-gpu") {
           init_git()
+          docker_init(ci_gpu)
           sh "${docker_run} --no-gpu ${ci_gpu} 
./tests/scripts/task_config_build_gpu.sh build"
           make("${ci_gpu} --no-gpu", 'build', '-j2')
           pack_lib('gpu', tvm_multilib)
@@ -592,6 +608,7 @@ stage('Build') {
       node('CPU-SMALL') {
         ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-cpu") {
           init_git()
+          docker_init(ci_cpu)
           sh (
             script: "${docker_run} ${ci_cpu} 
./tests/scripts/task_config_build_cpu.sh build",
             label: 'Create CPU cmake config',
@@ -615,6 +632,7 @@ stage('Build') {
       node('CPU-SMALL') {
         ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-wasm") {
           init_git()
+          docker_init(ci_wasm)
           sh (
             script: "${docker_run} ${ci_wasm} 
./tests/scripts/task_config_build_wasm.sh build",
             label: 'Create WASM cmake config',
@@ -639,6 +657,7 @@ stage('Build') {
       node('CPU-SMALL') {
         ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-i386") {
           init_git()
+          docker_init(ci_i386)
           sh (
             script: "${docker_run} ${ci_i386} 
./tests/scripts/task_config_build_i386.sh build",
             label: 'Create i386 cmake config',
@@ -656,6 +675,7 @@ stage('Build') {
       node('ARM') {
         ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-arm") {
           init_git()
+          docker_init(ci_arm)
           sh (
             script: "${docker_run} ${ci_arm} 
./tests/scripts/task_config_build_arm.sh build",
             label: 'Create ARM cmake config',
@@ -673,6 +693,7 @@ stage('Build') {
       node('CPU-SMALL') {
         ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-qemu") {
           init_git()
+          docker_init(ci_qemu)
           sh (
             script: "${docker_run} ${ci_qemu} 
./tests/scripts/task_config_build_qemu.sh build",
             label: 'Create QEMU cmake config',
@@ -691,6 +712,7 @@ stage('Build') {
       node('CPU-SMALL') {
         ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-hexagon") {
           init_git()
+          docker_init(ci_hexagon)
           sh (
             script: "${docker_run} ${ci_hexagon} 
./tests/scripts/task_config_build_hexagon.sh build",
             label: 'Create Hexagon cmake config',
@@ -702,7 +724,8 @@ stage('Build') {
      } else {
       Utils.markStageSkippedForConditional('BUILD: Hexagon')
     }
-  }
+  },
+  )
 }
 }
 
@@ -721,6 +744,7 @@ stage('Test') {
         ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-gpu") {
           try {
             init_git()
+            docker_init(ci_gpu)
             timeout(time: max_time, unit: 'MINUTES') {
               withEnv([
                 'PLATFORM=gpu',
@@ -757,6 +781,7 @@ stage('Test') {
         ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-gpu") {
           try {
             init_git()
+            docker_init(ci_gpu)
             timeout(time: max_time, unit: 'MINUTES') {
               withEnv([
                 'PLATFORM=gpu',
@@ -793,6 +818,7 @@ stage('Test') {
         ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-cpu") 
{
           try {
             init_git()
+            docker_init(ci_cpu)
             timeout(time: max_time, unit: 'MINUTES') {
               withEnv([
                 'PLATFORM=cpu',
@@ -821,6 +847,7 @@ stage('Test') {
         ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-cpu") 
{
           try {
             init_git()
+            docker_init(ci_cpu)
             timeout(time: max_time, unit: 'MINUTES') {
               withEnv([
                 'PLATFORM=cpu',
@@ -850,6 +877,7 @@ stage('Test') {
           timeout(time: max_time, unit: 'MINUTES') {
             try {
               init_git()
+              docker_init(ci_cpu)
               withEnv(['PLATFORM=cpu'], {
                 unpack_lib('cpu', tvm_multilib_tsim)
                 ci_setup(ci_cpu)
@@ -877,6 +905,7 @@ stage('Test') {
         
ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-i386") {
           try {
             init_git()
+            docker_init(ci_i386)
             timeout(time: max_time, unit: 'MINUTES') {
               withEnv([
                 'PLATFORM=i386',
@@ -908,6 +937,7 @@ stage('Test') {
         
ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-i386") {
           try {
             init_git()
+            docker_init(ci_i386)
             timeout(time: max_time, unit: 'MINUTES') {
               withEnv([
                 'PLATFORM=i386',
@@ -938,6 +968,7 @@ stage('Test') {
         
ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-i386") {
           try {
             init_git()
+            docker_init(ci_i386)
             timeout(time: max_time, unit: 'MINUTES') {
               withEnv([
                 'PLATFORM=i386',
@@ -968,6 +999,7 @@ stage('Test') {
         ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") {
           try {
             init_git()
+            docker_init(ci_hexagon)
             timeout(time: max_time, unit: 'MINUTES') {
               withEnv([
                 'PLATFORM=hexagon',
@@ -1001,6 +1033,7 @@ stage('Test') {
         ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") {
           try {
             init_git()
+            docker_init(ci_hexagon)
             timeout(time: max_time, unit: 'MINUTES') {
               withEnv([
                 'PLATFORM=hexagon',
@@ -1033,6 +1066,7 @@ stage('Test') {
         ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") {
           try {
             init_git()
+            docker_init(ci_hexagon)
             timeout(time: max_time, unit: 'MINUTES') {
               withEnv([
                 'PLATFORM=hexagon',
@@ -1065,6 +1099,7 @@ stage('Test') {
         ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/test-hexagon") {
           try {
             init_git()
+            docker_init(ci_hexagon)
             timeout(time: max_time, unit: 'MINUTES') {
               withEnv([
                 'PLATFORM=hexagon',
@@ -1098,6 +1133,7 @@ stage('Test') {
           timeout(time: max_time, unit: 'MINUTES') {
             try {
               init_git()
+              docker_init(ci_qemu)
               withEnv(['PLATFORM=qemu'], {
                 unpack_lib('qemu', tvm_lib)
                 unpack_microtvm_template_projects('qemu')
@@ -1129,6 +1165,7 @@ stage('Test') {
           timeout(time: max_time, unit: 'MINUTES') {
             try {
               init_git()
+              docker_init(ci_arm)
               withEnv(['PLATFORM=arm'], {
                 unpack_lib('arm', tvm_multilib)
                 ci_setup(ci_arm)
@@ -1158,6 +1195,7 @@ stage('Test') {
         ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") {
           try {
             init_git()
+            docker_init(ci_arm)
             timeout(time: max_time, unit: 'MINUTES') {
               withEnv([
                 'PLATFORM=arm',
@@ -1187,6 +1225,7 @@ stage('Test') {
         ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") {
           try {
             init_git()
+            docker_init(ci_arm)
             timeout(time: max_time, unit: 'MINUTES') {
               withEnv([
                 'PLATFORM=arm',
@@ -1216,6 +1255,7 @@ stage('Test') {
         ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/topi-python-gpu") {
           try {
             init_git()
+            docker_init(ci_gpu)
             timeout(time: max_time, unit: 'MINUTES') {
               withEnv([
                 'PLATFORM=gpu',
@@ -1244,6 +1284,7 @@ stage('Test') {
         ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/topi-python-gpu") {
           try {
             init_git()
+            docker_init(ci_gpu)
             timeout(time: max_time, unit: 'MINUTES') {
               withEnv([
                 'PLATFORM=gpu',
@@ -1272,6 +1313,7 @@ stage('Test') {
         ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") {
           try {
             init_git()
+            docker_init(ci_gpu)
             timeout(time: max_time, unit: 'MINUTES') {
               withEnv([
                 'PLATFORM=gpu',
@@ -1300,6 +1342,7 @@ stage('Test') {
         ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") {
           try {
             init_git()
+            docker_init(ci_gpu)
             timeout(time: max_time, unit: 'MINUTES') {
               withEnv([
                 'PLATFORM=gpu',
@@ -1328,6 +1371,7 @@ stage('Test') {
         ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") {
           try {
             init_git()
+            docker_init(ci_gpu)
             timeout(time: max_time, unit: 'MINUTES') {
               withEnv([
                 'PLATFORM=gpu',
@@ -1357,6 +1401,7 @@ stage('Test') {
           timeout(time: max_time, unit: 'MINUTES') {
             try {
               init_git()
+              docker_init(ci_cpu)
               withEnv(['PLATFORM=cpu'], {
                 unpack_lib('cpu', tvm_multilib)
                 ci_setup(ci_cpu)
@@ -1382,6 +1427,7 @@ stage('Test') {
           timeout(time: max_time, unit: 'MINUTES') {
             try {
               init_git()
+              docker_init(ci_arm)
               withEnv(['PLATFORM=arm'], {
                 unpack_lib('arm', tvm_multilib)
                 ci_setup(ci_arm)
@@ -1405,6 +1451,7 @@ stage('Test') {
       node('GPU') {
         ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/docs-python-gpu") {
           init_git()
+          docker_init(ci_gpu)
           unpack_lib('gpu', tvm_multilib)
           unpack_microtvm_template_projects('gpu')
           timeout(time: 180, unit: 'MINUTES') {
@@ -1485,6 +1532,25 @@ def deploy_docs() {
   }
 }
 
+
+def update_docker(ecr_image, hub_image) {
+  if (!ecr_image.contains("amazonaws.com")) {
+    sh("echo Skipping '${ecr_image}' since it doesn't look like an ECR image")
+    return
+  }
+  sh(
+    script: """
+    set -eux
+    docker pull ${ecr_image}
+    docker tag \
+      ${ecr_image} \
+      ${hub_image}
+    docker push ${hub_image}
+    """,
+    label: "Update ${hub_image} on Docker Hub",
+  )
+}
+
 stage('Deploy') {
   if (env.BRANCH_NAME == 'main' && env.DOCS_DEPLOY_ENABLED == 'yes') {
     node('CPU') {
@@ -1494,4 +1560,41 @@ stage('Deploy') {
       }
     }
   }
+  // if (env.BRANCH_NAME == 'main' && rebuild_docker_images) {
+  if (env.BRANCH_NAME == 'PR-11329' && rebuild_docker_images && 
upstream_revision != null) {
+    node('CPU') {
+      ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/deploy-docker") {
+        try {
+          withCredentials([string(
+            credentialsId: 'dockerhub-tlcpackstaging-key',
+            variable: 'DOCKERHUB_KEY',
+          )]) {
+            sh(
+              script: 'docker login -u tlcpackstaging -p ${DOCKERHUB_KEY}',
+              label: 'Log in to Docker Hub',
+            )
+          }
+          def date_Ymd_HMS = sh(
+            script: 'python -c \'import datetime; 
print(datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))\'',
+            label: 'Determine date',
+            returnStdout: true,
+          ).trim()
+          def tag = "${date_Ymd_HMS}-${upstream_revision.substring(0, 8)}"
+          update_docker(ci_arm, "tlcpackstaging/test_ci_arm:${tag}")
+          update_docker(ci_cpu, "tlcpackstaging/test_ci_cpu:${tag}")
+          update_docker(ci_gpu, "tlcpackstaging/test_ci_gpu:${tag}")
+          update_docker(ci_hexagon, "tlcpackstaging/test_ci_hexagon:${tag}")
+          update_docker(ci_i386, "tlcpackstaging/test_ci_i386:${tag}")
+          update_docker(ci_lint, "tlcpackstaging/test_ci_lint:${tag}")
+          update_docker(ci_qemu, "tlcpackstaging/test_ci_qemu:${tag}")
+          update_docker(ci_wasm, "tlcpackstaging/test_ci_wasm:${tag}")
+        } finally {
+          sh(
+            script: 'docker logout',
+            label: 'Clean up login credentials'
+          )
+        }
+      }
+    }
+  }
 }
diff --git a/jenkins/Jenkinsfile.j2 b/jenkins/Jenkinsfile.j2
index f250ff12fe..ec1aecfdd0 100644
--- a/jenkins/Jenkinsfile.j2
+++ b/jenkins/Jenkinsfile.j2
@@ -82,6 +82,8 @@ docker_build = 'docker/build.sh'
 // timeout in minutes
 max_time = 180
 rebuild_docker_images = false
+{% set aws_default_region = "us-west-2" %}
+{% set aws_ecr_url = "dkr.ecr." + aws_default_region + ".amazonaws.com" %}
 
 def per_exec_ws(folder) {
   return "workspace/exec_${env.EXECUTOR_NUMBER}/" + folder
@@ -190,66 +192,111 @@ if 
(currentBuild.getBuildCauses().toString().contains('BranchIndexingCause')) {
 
 cancel_previous_build()
 
-def lint() {
-stage('Lint') {
-  parallel(
-    {% call m.sharded_lint_step(name='Lint', num_shards=2, node='CPU-SMALL', 
ws='tvm/lint') %}
-      {% for image in images %}
-      {{ image.name }} = params.{{ image.name }}_param ?: {{ image.name }}
-      {% endfor %}
+def run_lint() {
+  stage('Lint') {
+    parallel(
+      {% call m.sharded_lint_step(
+        name='Lint',
+        num_shards=2,
+        node='CPU-SMALL',
+        ws='tvm/lint',
+        docker_image="ci_lint")
+      %}
+        sh (
+          script: "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh",
+          label: 'Run lint',
+        )
+      {% endcall %}
+    )
+  }
+}
 
-      sh (script: """
-        echo "Docker images being used in this build:"
+def prepare() {
+  stage('Prepare') {
+    node('CPU-SMALL') {
+      ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/prepare") {
+        init_git()
         {% for image in images %}
-        echo " {{ image.name }} = ${ {{- image.name -}} }"
+        {{ image.name }} = params.{{ image.name }}_param ?: {{ image.name }}
         {% endfor %}
-      """, label: 'Docker image names')
 
-      is_docs_only_build = sh (
-        returnStatus: true,
-        script: './tests/scripts/git_change_docs.sh',
-        label: 'Check for docs only changes',
-      )
-      skip_ci = should_skip_ci(env.CHANGE_ID)
-      skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID)
-      rebuild_docker_images = sh (
-        returnStatus: true,
-        script: './tests/scripts/git_change_docker.sh',
-        label: 'Check for any docker changes',
-      )
-      if (skip_ci) {
-        // Don't rebuild when skipping CI
-        rebuild_docker_images = false
-      }
-      if (rebuild_docker_images) {
-        // Exit before linting so we can use the newly created Docker images
-        // to run the lint
-        return
+        sh (script: """
+          echo "Docker images being used in this build:"
+          {% for image in images %}
+          echo " {{ image.name }} = ${ {{- image.name -}} }"
+          {% endfor %}
+        """, label: 'Docker image names')
+
+        is_docs_only_build = sh (
+          returnStatus: true,
+          script: './tests/scripts/git_change_docs.sh',
+          label: 'Check for docs only changes',
+        )
+        skip_ci = should_skip_ci(env.CHANGE_ID)
+        skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID)
+        rebuild_docker_images = sh (
+          returnStatus: true,
+          script: './tests/scripts/should_rebuild_docker.py',
+          label: 'Check for any docker changes',
+        )
+        if (skip_ci) {
+          // Don't rebuild when skipping CI
+          rebuild_docker_images = false
+        }
       }
-      sh (
-        script: "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh",
-        label: 'Run lint',
-      )
-    {% endcall %}
-  )
-}
+    }
+  }
 }
 
 // [note: method size]
 // This has to be extracted into a method due to JVM limitations on the size of
 // a method (so the code can't all be inlined)
-lint()
+prepare()
 
-def build_image(image_name) {
-  hash = sh(
+def ecr_push(full_name) {
+  aws_account_id = sh(
     returnStdout: true,
-    script: 'git log -1 --format=\'%h\''
+    script: 'aws sts get-caller-identity | grep Account | cut -f4 -d\\"',
+    label: 'Get AWS ID'
   ).trim()
-  def full_name = 
"${image_name}:${env.BRANCH_NAME}-${hash}-${env.BUILD_NUMBER}"
-  sh(
-    script: "${docker_build} ${image_name} --spec ${full_name}",
-    label: 'Build docker image'
-  )
+
+  def ecr_name = "${aws_account_id}.{{ aws_ecr_url }}/${full_name}"
+  try {
+    withEnv([
+      "AWS_ACCOUNT_ID=${aws_account_id}",
+      'AWS_DEFAULT_REGION={{ aws_default_region }}',
+      "AWS_ECR_REPO=${aws_account_id}.{{ aws_ecr_url }}"]) {
+      sh(
+        script: '''
+          set -eux
+          aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker 
login --username AWS --password-stdin $AWS_ECR_REPO
+        ''',
+        label: 'Log in to ECR'
+      )
+      sh(
+        script: """
+          set -x
+          docker tag ${full_name} \$AWS_ECR_REPO/${full_name}
+          docker push \$AWS_ECR_REPO/${full_name}
+        """,
+        label: 'Upload image to ECR'
+      )
+    }
+  } finally {
+    withEnv([
+      "AWS_ACCOUNT_ID=${aws_account_id}",
+      'AWS_DEFAULT_REGION={{ aws_default_region }}',
+      "AWS_ECR_REPO=${aws_account_id}.{{ aws_ecr_url }}"]) {
+      sh(
+        script: 'docker logout $AWS_ECR_REPO',
+        label: 'Clean up login credentials'
+      )
+    }
+  }
+  return ecr_name
+}
+
+def ecr_pull(full_name) {
   aws_account_id = sh(
     returnStdout: true,
     script: 'aws sts get-caller-identity | grep Account | cut -f4 -d\\"',
@@ -257,125 +304,73 @@ def build_image(image_name) {
   ).trim()
 
   try {
-    // Use a credential so Jenkins knows to scrub the AWS account ID which is 
nice
-    // (but so we don't have to rely it being hardcoded in Jenkins)
-    withCredentials([string(
-      credentialsId: 'aws-account-id',
-      variable: '_ACCOUNT_ID_DO_NOT_USE',
-      )]) {
-      withEnv([
-        "AWS_ACCOUNT_ID=${aws_account_id}",
-        'AWS_DEFAULT_REGION=us-west-2']) {
-        sh(
-          script: '''
-            set -x
-            aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker 
login --username AWS --password-stdin 
$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com
-          ''',
-          label: 'Log in to ECR'
-        )
-        sh(
-          script: """
-            set -x
-            docker tag ${full_name} 
\$AWS_ACCOUNT_ID.dkr.ecr.\$AWS_DEFAULT_REGION.amazonaws.com/${full_name}
-            docker push 
\$AWS_ACCOUNT_ID.dkr.ecr.\$AWS_DEFAULT_REGION.amazonaws.com/${full_name}
-          """,
-          label: 'Upload image to ECR'
-        )
-      }
+    withEnv([
+      "AWS_ACCOUNT_ID=${aws_account_id}",
+      'AWS_DEFAULT_REGION={{ aws_default_region }}',
+      "AWS_ECR_REPO=${aws_account_id}.{{ aws_ecr_url }}"]) {
+      sh(
+        script: '''
+          set -eux
+          aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker 
login --username AWS --password-stdin $AWS_ECR_REPO
+        ''',
+        label: 'Log in to ECR'
+      )
+      sh(
+        script: """
+          set -eux
+          docker pull ${full_name}
+        """,
+        label: 'Pull image from ECR'
+      )
     }
   } finally {
-    sh(
-      script: 'rm -f ~/.docker/config.json',
-      label: 'Clean up login credentials'
-    )
+    withEnv([
+      "AWS_ACCOUNT_ID=${aws_account_id}",
+      'AWS_DEFAULT_REGION={{ aws_default_region }}',
+      "AWS_ECR_REPO=${aws_account_id}.{{ aws_ecr_url }}"]) {
+      sh(
+        script: 'docker logout $AWS_ECR_REPO',
+        label: 'Clean up login credentials'
+      )
+    }
   }
+}
+
+
+def build_image(image_name) {
+  hash = sh(
+    returnStdout: true,
+    script: 'git log -1 --format=\'%h\''
+  ).trim()
+  def full_name = 
"${image_name}:${env.BRANCH_NAME}-${hash}-${env.BUILD_NUMBER}"
   sh(
-    script: "docker rmi ${full_name}",
-    label: 'Remove docker image'
+    script: "${docker_build} ${image_name} --spec ${full_name}",
+    label: 'Build docker image'
   )
+  return ecr_push(full_name)
 }
 
 if (rebuild_docker_images) {
   stage('Docker Image Build') {
     // TODO in a follow up PR: Find ecr tag and use in subsequent builds
-    parallel 'ci-lint': {
-      node('CPU') {
-        timeout(time: max_time, unit: 'MINUTES') {
-          init_git()
-          build_image('ci_lint')
-        }
-      }
-    }, 'ci-cpu': {
-      node('CPU') {
-        timeout(time: max_time, unit: 'MINUTES') {
-          init_git()
-          build_image('ci_cpu')
-        }
-      }
-    }, 'ci-gpu': {
-      node('GPU') {
-        timeout(time: max_time, unit: 'MINUTES') {
-          init_git()
-          build_image('ci_gpu')
-        }
-      }
-    }, 'ci-qemu': {
-      node('CPU') {
-        timeout(time: max_time, unit: 'MINUTES') {
-          init_git()
-          build_image('ci_qemu')
-        }
-      }
-    }, 'ci-i386': {
-      node('CPU') {
-        timeout(time: max_time, unit: 'MINUTES') {
-          init_git()
-          build_image('ci_i386')
-        }
-      }
-    }, 'ci-arm': {
-      node('ARM') {
-        timeout(time: max_time, unit: 'MINUTES') {
-          init_git()
-          build_image('ci_arm')
-        }
-      }
-    }, 'ci-wasm': {
-      node('CPU') {
-        timeout(time: max_time, unit: 'MINUTES') {
-          init_git()
-          build_image('ci_wasm')
-        }
-      }
-    }, 'ci-hexagon': {
-      node('CPU') {
-        timeout(time: max_time, unit: 'MINUTES') {
-          init_git()
-          build_image('ci_hexagon')
+    parallel(
+    {% for image in images %}
+      '{{ image.name }}': {
+        node('{{ image.platform }}') {
+          timeout(time: max_time, unit: 'MINUTES') {
+            init_git()
+            {{ image.name }} = build_image('{{ image.name }}')
+          }
         }
-      }
-    }
+      },
+    {% endfor %}
+    )
   }
-  // // TODO: Once we are able to use the built images, enable this step
-  // // If the docker images changed, we need to run the image build before 
the lint
-  // // can run since it requires a base docker image. Most of the time the 
images
-  // // aren't build though so it's faster to use the same node that checks for
-  // // docker changes to run the lint in the usual case.
-  // stage('Sanity Check (re-run)') {
-  //   timeout(time: max_time, unit: 'MINUTES') {
-  //     node('CPU') {
-  //       ws({{ m.per_exec_ws('tvm/sanity') }}) {
-  //         init_git()
-  //         sh (
-  //           script: "${docker_run} ${ci_lint}  
./tests/scripts/task_lint.sh",
-  //           label: 'Run lint',
-  //         )
-  //       }
-  //     }
-  //   }
-  // }
 }
 
+// Run the lint with the new Docker image before continuing to builds
+run_lint()
+
 // Run make. First try to do an incremental make from a previous workspace in 
hope to
 // accelerate the compilation. If something is wrong, clean the workspace and 
then
 // build from scratch.
@@ -482,16 +477,31 @@ def cpp_unittest(image) {
   )
 }
 
+def docker_init(image) {
+  if (image.contains("amazonaws.com")) {
+    // If this string is in the image name it's from ECR and needs to be pulled
+    // with the right credentials
+    ecr_pull(image)
+  } else {
+    sh(
+      script: "docker pull ${image}",
+      label: 'Pull docker image',
+    )
+  }
+}
+
 def build() {
 stage('Build') {
   environment {
     SKIP_SLOW_TESTS = "${skip_slow_tests}"
   }
-  parallel 'BUILD: GPU': {
+  parallel(
+    'BUILD: GPU': {
     if (!skip_ci) {
       node('CPU-SMALL') {
         ws({{ m.per_exec_ws('tvm/build-gpu') }}) {
           init_git()
+          docker_init(ci_gpu)
           sh "${docker_run} --no-gpu ${ci_gpu} 
./tests/scripts/task_config_build_gpu.sh build"
           make("${ci_gpu} --no-gpu", 'build', '-j2')
           pack_lib('gpu', tvm_multilib)
@@ -509,6 +519,7 @@ stage('Build') {
       node('CPU-SMALL') {
         ws({{ m.per_exec_ws('tvm/build-cpu') }}) {
           init_git()
+          docker_init(ci_cpu)
           sh (
             script: "${docker_run} ${ci_cpu} 
./tests/scripts/task_config_build_cpu.sh build",
             label: 'Create CPU cmake config',
@@ -532,6 +543,7 @@ stage('Build') {
       node('CPU-SMALL') {
         ws({{ m.per_exec_ws('tvm/build-wasm') }}) {
           init_git()
+          docker_init(ci_wasm)
           sh (
             script: "${docker_run} ${ci_wasm} 
./tests/scripts/task_config_build_wasm.sh build",
             label: 'Create WASM cmake config',
@@ -556,6 +568,7 @@ stage('Build') {
       node('CPU-SMALL') {
         ws({{ m.per_exec_ws('tvm/build-i386') }}) {
           init_git()
+          docker_init(ci_i386)
           sh (
             script: "${docker_run} ${ci_i386} 
./tests/scripts/task_config_build_i386.sh build",
             label: 'Create i386 cmake config',
@@ -573,6 +586,7 @@ stage('Build') {
       node('ARM') {
         ws({{ m.per_exec_ws('tvm/build-arm') }}) {
           init_git()
+          docker_init(ci_arm)
           sh (
             script: "${docker_run} ${ci_arm} 
./tests/scripts/task_config_build_arm.sh build",
             label: 'Create ARM cmake config',
@@ -590,6 +604,7 @@ stage('Build') {
       node('CPU-SMALL') {
         ws({{ m.per_exec_ws('tvm/build-qemu') }}) {
           init_git()
+          docker_init(ci_qemu)
           sh (
             script: "${docker_run} ${ci_qemu} 
./tests/scripts/task_config_build_qemu.sh build",
             label: 'Create QEMU cmake config',
@@ -608,6 +623,7 @@ stage('Build') {
       node('CPU-SMALL') {
         ws({{ m.per_exec_ws('tvm/build-hexagon') }}) {
           init_git()
+          docker_init(ci_hexagon)
           sh (
             script: "${docker_run} ${ci_hexagon} 
./tests/scripts/task_config_build_hexagon.sh build",
             label: 'Create Hexagon cmake config',
@@ -619,7 +635,8 @@ stage('Build') {
      } else {
       Utils.markStageSkippedForConditional('BUILD: Hexagon')
     }
-  }
+  },
+  )
 }
 }
 
@@ -638,6 +655,7 @@ stage('Test') {
     node="GPU",
     ws="tvm/ut-python-gpu",
     platform="gpu",
+    docker_image="ci_gpu",
   ) %}
     {% if shard_index == 1 %}
     unpack_lib('gpu2', tvm_multilib)
@@ -668,10 +686,11 @@ stage('Test') {
   {% call(shard_index, num_shards) m.sharded_test_step(
     name="integration: CPU",
     node="CPU",
-      num_shards=2,
-      ws="tvm/integration-python-cpu",
-      platform="cpu",
-    ) %}
+    num_shards=2,
+    ws="tvm/integration-python-cpu",
+    platform="cpu",
+    docker_image="ci_cpu",
+  ) %}
     unpack_lib('cpu', tvm_multilib_tsim)
     ci_setup(ci_cpu)
     sh (
@@ -684,6 +703,7 @@ stage('Test') {
     node="CPU-SMALL",
     ws="tvm/ut-python-cpu",
     platform="cpu",
+    docker_image="ci_cpu",
   ) %}
     unpack_lib('cpu', tvm_multilib_tsim)
     ci_setup(ci_cpu)
@@ -701,6 +721,7 @@ stage('Test') {
     num_shards=3,
     ws="tvm/integration-python-i386",
     platform="i386",
+    docker_image="ci_i386",
   ) %}
     unpack_lib('i386', tvm_multilib)
     ci_setup(ci_i386)
@@ -720,6 +741,7 @@ stage('Test') {
     ws="tvm/test-hexagon",
     platform="hexagon",
     num_shards=4,
+    docker_image="ci_hexagon",
   ) %}
     unpack_lib('hexagon', tvm_lib)
     ci_setup(ci_hexagon)
@@ -740,6 +762,7 @@ stage('Test') {
     node="CPU-SMALL",
     ws="tvm/test-qemu",
     platform="qemu",
+    docker_image="ci_qemu",
   ) %}
     unpack_lib('qemu', tvm_lib)
     unpack_microtvm_template_projects('qemu')
@@ -759,6 +782,7 @@ stage('Test') {
     node="ARM",
     ws="tvm/ut-python-arm",
     platform="arm",
+    docker_image="ci_arm",
 ) %}
     unpack_lib('arm', tvm_multilib)
     ci_setup(ci_arm)
@@ -777,6 +801,7 @@ stage('Test') {
     num_shards=2,
     node="ARM", ws="tvm/ut-python-arm",
     platform="arm",
+    docker_image="ci_arm",
   ) %}
     unpack_lib('arm', tvm_multilib)
     ci_setup(ci_arm)
@@ -792,6 +817,7 @@ stage('Test') {
     num_shards=2,
     ws="tvm/topi-python-gpu",
     platform="gpu",
+    docker_image="ci_gpu",
   ) %}
     unpack_lib('gpu', tvm_multilib)
     ci_setup(ci_gpu)
@@ -805,6 +831,7 @@ stage('Test') {
     num_shards=3,
     ws="tvm/frontend-python-gpu",
     platform="gpu",
+    docker_image="ci_gpu",
   ) %}
     unpack_lib('gpu', tvm_multilib)
     ci_setup(ci_gpu)
@@ -818,6 +845,7 @@ stage('Test') {
     node="CPU",
     ws="tvm/frontend-python-cpu",
     platform="cpu",
+    docker_image="ci_cpu",
 ) %}
     unpack_lib('cpu', tvm_multilib)
     ci_setup(ci_cpu)
@@ -831,6 +859,7 @@ stage('Test') {
     node="ARM",
     ws="tvm/frontend-python-arm",
     platform="arm",
+    docker_image="ci_arm",
 ) %}
     unpack_lib('arm', tvm_multilib)
     ci_setup(ci_arm)
@@ -844,6 +873,7 @@ stage('Test') {
       node('GPU') {
         ws({{ m.per_exec_ws('tvm/docs-python-gpu') }}) {
           init_git()
+          docker_init(ci_gpu)
           unpack_lib('gpu', tvm_multilib)
           unpack_microtvm_template_projects('gpu')
           timeout(time: 180, unit: 'MINUTES') {
@@ -924,6 +954,25 @@ def deploy_docs() {
   }
 }
 
+
+def update_docker(ecr_image, hub_image) {
+  if (!ecr_image.contains("amazonaws.com")) {
+    sh("echo Skipping '${ecr_image}' since it doesn't look like an ECR image")
+    return
+  }
+  sh(
+    script: """
+    set -eux
+    docker pull ${ecr_image}
+    docker tag \
+      ${ecr_image} \
+      ${hub_image}
+    docker push ${hub_image}
+    """,
+    label: "Update ${hub_image} on Docker Hub",
+  )
+}
+
 stage('Deploy') {
   if (env.BRANCH_NAME == 'main' && env.DOCS_DEPLOY_ENABLED == 'yes') {
     node('CPU') {
@@ -933,4 +982,36 @@ stage('Deploy') {
       }
     }
   }
+  // if (env.BRANCH_NAME == 'main' && rebuild_docker_images) {
+  if (env.BRANCH_NAME == 'PR-11329' && rebuild_docker_images && 
upstream_revision != null) {
+    node('CPU') {
+      ws({{ m.per_exec_ws('tvm/deploy-docker') }}) {
+        try {
+          withCredentials([string(
+            credentialsId: 'dockerhub-tlcpackstaging-key',
+            variable: 'DOCKERHUB_KEY',
+          )]) {
+            sh(
+              script: 'docker login -u tlcpackstaging -p ${DOCKERHUB_KEY}',
+              label: 'Log in to Docker Hub',
+            )
+          }
+          def date_Ymd_HMS = sh(
+            script: 'python -c \'import datetime; 
print(datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))\'',
+            label: 'Determine date',
+            returnStdout: true,
+          ).trim()
+          def tag = "${date_Ymd_HMS}-${upstream_revision.substring(0, 8)}"
+          {% for image in images %}
+          update_docker({{ image.name }}, "tlcpackstaging/test_{{ image.name 
}}:${tag}")
+          {% endfor %}
+        } finally {
+          sh(
+            script: 'docker logout',
+            label: 'Clean up login credentials'
+          )
+        }
+      }
+    }
+  }
 }
diff --git a/jenkins/macros.j2 b/jenkins/macros.j2
index de33a203f6..281cbb3c4d 100644
--- a/jenkins/macros.j2
+++ b/jenkins/macros.j2
@@ -19,7 +19,7 @@
   "workspace/exec_${env.EXECUTOR_NUMBER}/{{ folder }}"
 {%- endmacro -%}
 
-{% macro sharded_test_step(name, num_shards, node, ws, platform) %}
+{% macro sharded_test_step(name, num_shards, node, ws, platform, docker_image) 
%}
 {% for shard_index in range(1, num_shards + 1) %}
   '{{ name }} {{ shard_index }} of {{ num_shards }}': {
     if (!skip_ci && is_docs_only_build != 1) {
@@ -27,6 +27,7 @@
         ws({{ per_exec_ws(ws) }}) {
           try {
             init_git()
+            docker_init({{ docker_image }})
             timeout(time: max_time, unit: 'MINUTES') {
               withEnv([
                 'PLATFORM={{ platform }}',
@@ -47,12 +48,13 @@
 {% endfor %}
 {% endmacro %}
 
-{% macro sharded_lint_step(name, num_shards, node, ws) %}
+{% macro sharded_lint_step(name, num_shards, node, ws, docker_image) %}
 {% for shard_index in range(1, num_shards + 1) %}
   '{{ name }} {{ shard_index }} of {{ num_shards }}': {
     node('{{ node }}') {
       ws({{ per_exec_ws(ws) }}) {
         init_git()
+        docker_init({{ docker_image }})
         timeout(time: max_time, unit: 'MINUTES') {
           withEnv([
             'TVM_NUM_SHARDS={{ num_shards }}',
@@ -67,7 +69,7 @@
 {% endmacro %}
 
 
-{% macro test_step(name, node, ws, platform) %}
+{% macro test_step(name, node, ws, platform, docker_image) %}
   '{{ name }}': {
     if (!skip_ci && is_docs_only_build != 1) {
       node('{{ node }}') {
@@ -75,6 +77,7 @@
           timeout(time: max_time, unit: 'MINUTES') {
             try {
               init_git()
+              docker_init({{ docker_image }})
               withEnv(['PLATFORM={{ platform }}'], {
                 {{ caller() | indent(width=12) | trim }}
               })
diff --git a/tests/python/ci/test_ci.py b/tests/python/ci/test_ci.py
index e197d7e48a..b412a7067a 100644
--- a/tests/python/ci/test_ci.py
+++ b/tests/python/ci/test_ci.py
@@ -18,8 +18,10 @@
 import subprocess
 import sys
 import json
+from tempfile import tempdir
 import textwrap
 import pytest
+from pathlib import Path
 
 from test_utils import REPO_ROOT
 
@@ -28,11 +30,13 @@ class TempGit:
     def __init__(self, cwd):
         self.cwd = cwd
 
-    def run(self, *args):
-        proc = subprocess.run(["git"] + list(args), cwd=self.cwd)
+    def run(self, *args, **kwargs):
+        proc = subprocess.run(["git"] + list(args), encoding="utf-8", 
cwd=self.cwd, **kwargs)
         if proc.returncode != 0:
             raise RuntimeError(f"git command failed: '{args}'")
 
+        return proc
+
 
 def test_cc_reviewers(tmpdir_factory):
     reviewers_script = REPO_ROOT / "tests" / "scripts" / 
"github_cc_reviewers.py"
@@ -731,5 +735,92 @@ def test_github_tag_teams(tmpdir_factory):
     )
 
 
[email protected](
+    "changed_files,name,check,expected_code",
+    [
+        d.values()
+        for d in [
+            dict(
+                changed_files=[],
+                name="abc",
+                check="Image abc is not using new naming scheme",
+                expected_code=1,
+            ),
+            dict(
+                changed_files=[], name="123-123-abc", check="No extant hash 
found", expected_code=1
+            ),
+            dict(
+                changed_files=[["test.txt"]],
+                name=None,
+                check="Did not find changes, no rebuild necessary",
+                expected_code=0,
+            ),
+            dict(
+                changed_files=[["test.txt"], ["docker/test.txt"]],
+                name=None,
+                check="Found docker changes",
+                expected_code=2,
+            ),
+        ]
+    ],
+)
+def test_should_rebuild_docker(tmpdir_factory, changed_files, name, check, 
expected_code):
+    tag_script = REPO_ROOT / "tests" / "scripts" / "should_rebuild_docker.py"
+
+    git = TempGit(tmpdir_factory.mktemp("tmp_git_dir"))
+    git.run("init")
+    git.run("checkout", "-b", "main")
+    git.run("remote", "add", "origin", "https://github.com/apache/tvm.git";)
+
+    git_path = Path(git.cwd)
+    for i, commits in enumerate(changed_files):
+        for filename in commits:
+            path = git_path / filename
+            path.parent.mkdir(exist_ok=True, parents=True)
+            path.touch()
+            git.run("add", filename)
+
+        git.run("commit", "-m", f"message {i}")
+
+    if name is None:
+        ref = "HEAD"
+        if len(changed_files) > 1:
+            ref = f"HEAD~{len(changed_files) - 1}"
+        proc = git.run("rev-parse", ref, stdout=subprocess.PIPE)
+        last_hash = proc.stdout.strip()
+        name = f"123-123-{last_hash}"
+
+    docker_data = {
+        "repositories/tlcpack": {
+            "results": [
+                {
+                    "name": "ci-something",
+                },
+                {
+                    "name": "something-else",
+                },
+            ],
+        },
+        "repositories/tlcpack/ci-something/tags": {
+            "results": [{"name": name}, {"name": name + "old"}],
+        },
+    }
+
+    proc = subprocess.run(
+        [
+            str(tag_script),
+            "--testing-docker-data",
+            json.dumps(docker_data),
+        ],
+        stdout=subprocess.PIPE,
+        stderr=subprocess.STDOUT,
+        encoding="utf-8",
+        cwd=git.cwd,
+    )
+
+    assert_in(check, proc.stdout)
+    assert proc.returncode == expected_code
+
+
 if __name__ == "__main__":
     sys.exit(pytest.main([__file__] + sys.argv[1:]))
diff --git a/tests/scripts/cmd_utils.py b/tests/scripts/cmd_utils.py
index 272086796e..771c3ee52d 100644
--- a/tests/scripts/cmd_utils.py
+++ b/tests/scripts/cmd_utils.py
@@ -44,18 +44,21 @@ def init_log():
 
 
 class Sh:
-    def __init__(self, env=None):
+    def __init__(self, env=None, cwd=None):
         self.env = os.environ.copy()
         if env is not None:
             self.env.update(env)
+        self.cwd = cwd
 
     def run(self, cmd: str, **kwargs):
         logging.info(f"+ {cmd}")
-        if "check" not in kwargs:
-            kwargs["check"] = True
-        if "shell" not in kwargs:
-            kwargs["shell"] = True
-        if "env" not in kwargs:
-            kwargs["env"] = self.env
-
-        subprocess.run(cmd, **kwargs)
+        defaults = {
+            "check": True,
+            "shell": True,
+            "env": self.env,
+            "encoding": "utf-8",
+            "cwd": self.cwd,
+        }
+        defaults.update(kwargs)
+
+        return subprocess.run(cmd, **defaults)
diff --git a/tests/scripts/git_utils.py b/tests/scripts/git_utils.py
index bc00bdf127..1fceb908ed 100644
--- a/tests/scripts/git_utils.py
+++ b/tests/scripts/git_utils.py
@@ -19,6 +19,7 @@
 import json
 import subprocess
 import re
+import logging
 from urllib import request
 from typing import Dict, Tuple, Any, Optional, List
 
diff --git a/tests/scripts/http_utils.py b/tests/scripts/http_utils.py
new file mode 100644
index 0000000000..c14259479d
--- /dev/null
+++ b/tests/scripts/http_utils.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python3
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import json
+import logging
+from urllib import request
+from typing import Dict, Any, Optional
+
+
+def get(url: str, headers: Optional[Dict[str, str]] = None) -> Dict[str, Any]:
+    logging.info(f"Requesting GET to {url}")
+    if headers is None:
+        headers = {}
+    req = request.Request(url, headers=headers)
+    with request.urlopen(req) as response:
+        response_headers = {k: v for k, v in response.getheaders()}
+        response = json.loads(response.read())
+
+    return response, response_headers
diff --git a/tests/scripts/should_rebuild_docker.py 
b/tests/scripts/should_rebuild_docker.py
new file mode 100755
index 0000000000..dc12c38de8
--- /dev/null
+++ b/tests/scripts/should_rebuild_docker.py
@@ -0,0 +1,154 @@
+#!/usr/bin/env python3
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+import argparse
+import datetime
+import json
+import logging
+import subprocess
+
+from typing import Dict, Any, List
+
+
+from http_utils import get
+from cmd_utils import Sh, init_log
+
+
+DOCKER_API_BASE = "https://hub.docker.com/v2/";
+PAGE_SIZE = 25
+TEST_DATA = None
+
+
+def docker_api(url: str) -> Dict[str, Any]:
+    """
+    Run a paginated fetch from the public Docker Hub API
+    """
+    if TEST_DATA is not None:
+        return TEST_DATA[url]
+    pagination = f"?page_size={PAGE_SIZE}&page=1"
+    url = DOCKER_API_BASE + url + pagination
+    r, headers = get(url)
+    reset = headers.get("x-ratelimit-reset")
+    if reset is not None:
+        reset = datetime.datetime.fromtimestamp(int(reset))
+        reset = reset.isoformat()
+    logging.info(
+        f"Docker API Rate Limit: {headers.get('x-ratelimit-remaining')} / 
{headers.get('x-ratelimit-limit')} (reset at {reset})"
+    )
+    if "results" not in r:
+        raise RuntimeError(f"Error fetching data, no results found in: {r}")
+    return r
+
+
+def any_docker_changes_since(hash: str) -> bool:
+    """
+    Check the docker/ directory, return True if there have been any code 
changes
+    since the specified hash
+    """
+    sh = Sh()
+    cmd = f"git diff {hash} -- docker/"
+    proc = sh.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+    stdout = proc.stdout.strip()
+    return stdout != "", stdout
+
+
+def does_commit_exist(hash: str) -> bool:
+    """
+    Returns True if the hash exists in the repo
+    """
+    sh = Sh()
+    cmd = f"git rev-parse -q {hash}"
+    proc = sh.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, 
check=False)
+    print(proc.stdout)
+    if proc.returncode == 0:
+        return True
+
+    if "unknown revision or path not in the working tree" in proc.stdout:
+        return False
+
+    raise RuntimeError(f"Unexpected failure when running: {cmd}")
+
+
+def find_hash_for_tag(tag: Dict[str, Any]) -> str:
+    """
+    Split the hash off of a name like <date>-<time>-<hash>
+    """
+    name = tag["name"]
+    name_parts = name.split("-")
+    if len(name_parts) != 3:
+        raise RuntimeError(f"Image {name} is not using new naming scheme")
+    shorthash = name_parts[2]
+    return shorthash
+
+
+def find_commit_in_repo(tags: List[Dict[str, Any]]):
+    """
+    Look through all the docker tags, find the most recent one which references
+    a commit that is present in the repo
+    """
+    for tag in tags["results"]:
+        shorthash = find_hash_for_tag(tag)
+        logging.info(f"Hash '{shorthash}' does not exist in repo")
+        if does_commit_exist(shorthash):
+            return shorthash, tag
+
+    raise RuntimeError(f"No extant hash found in tags:\n{tags}")
+
+
+def main():
+    # Fetch all tlcpack images
+    images = docker_api("repositories/tlcpack")
+
+    # Ignore all non-ci images
+    relevant_images = [image for image in images["results"] if 
image["name"].startswith("ci-")]
+    image_names = [image["name"] for image in relevant_images]
+    logging.info(f"Found {len(relevant_images)} images to check: {', 
'.join(image_names)}")
+
+    for image in relevant_images:
+        # Check the tags for the image
+        tags = docker_api(f"repositories/tlcpack/{image['name']}/tags")
+
+        # Find the hash of the most recent tag
+        shorthash, tag = find_commit_in_repo(tags)
+        name = tag["name"]
+        logging.info(f"Looking for docker/ changes since {shorthash}")
+
+        any_docker_changes, diff = any_docker_changes_since(shorthash)
+        if any_docker_changes:
+            logging.info(f"Found docker changes from {shorthash} when checking 
{name}")
+            logging.info(diff)
+            exit(2)
+
+    logging.info("Did not find changes, no rebuild necessary")
+    exit(0)
+
+
+if __name__ == "__main__":
+    init_log()
+    parser = argparse.ArgumentParser(
+        description="Exits 0 if Docker images don't need to be rebuilt, 1 
otherwise"
+    )
+    parser.add_argument(
+        "--testing-docker-data",
+        help="(testing only) JSON data to mock response from Docker Hub API",
+    )
+    args = parser.parse_args()
+
+    if args.testing_docker_data is not None:
+        TEST_DATA = json.loads(args.testing_docker_data)
+
+    main()

Reply via email to