This is an automated email from the ASF dual-hosted git repository.

areusch pushed a commit to branch ci-docker-staging
in repository https://gitbox.apache.org/repos/asf/tvm.git

commit 4798718234d70f75ec889e1066e31e07c8a1a4bb
Author: Andrew Reusch <[email protected]>
AuthorDate: Thu Apr 28 09:53:15 2022 -0700

    Make microtvm_template_projects available in tutorials.
---
 Jenkinsfile            | 61 ++++++++++++++++++++++++++++++++------------------
 jenkins/Jenkinsfile.j2 | 61 ++++++++++++++++++++++++++++++++------------------
 2 files changed, 78 insertions(+), 44 deletions(-)

diff --git a/Jenkinsfile b/Jenkinsfile
index 824edeac4f..abbe05e0eb 100755
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -45,7 +45,7 @@
 // 'python3 jenkins/generate.py'
 // Note: This timestamp is here to ensure that updates to the Jenkinsfile are
 // always rebased on main before merging:
-// Generated at 2022-04-27T09:06:39.799194
+// Generated at 2022-04-28T10:46:03.315101
 
 import org.jenkinsci.plugins.pipeline.modeldefinition.Utils
 // NOTE: these lines are scanned by docker/dev_common.sh. Please update the 
regex as needed. -->
@@ -75,17 +75,8 @@ properties([
   ])
 ])
 
-// tvm libraries
-tvm_runtime = 'build/libtvm_runtime.so, build/config.cmake'
-tvm_lib = 'build/libtvm.so, ' + tvm_runtime
-// LLVM upstream lib
-tvm_multilib = 'build/libtvm.so, ' +
-               'build/libvta_fsim.so, ' +
-               tvm_runtime
-
-tvm_multilib_tsim = 'build/libvta_tsim.so, ' +
-               tvm_multilib
-microtvm_lib = 'build/microtvm_template_projects.tar.gz, ' + tvm_lib
+// Global variable assigned during Sanity Check that holds the sha1 which 
should be
+// merged into the PR in all branches.
 upstream_revision = null
 
 // command to start a docker container
@@ -418,6 +409,19 @@ def make(docker_type, path, make_flag) {
   }
 }
 
+// Specifications to Jenkins "stash" command for use with various pack_ and 
unpack_ functions.
+tvm_runtime = 'build/libtvm_runtime.so, build/config.cmake'  // use 
libtvm_runtime.so.
+tvm_lib = 'build/libtvm.so, ' + tvm_runtime  // use libtvm.so to run the full 
compiler.
+// LLVM upstream lib
+tvm_multilib = 'build/libtvm.so, ' +
+               'build/libvta_fsim.so, ' +
+               tvm_runtime
+
+tvm_multilib_tsim = 'build/libvta_tsim.so, ' +
+                    tvm_multilib
+
+microtvm_tar_gz = 'build/microtvm_template_projects.tar.gz'
+
 // pack libraries for later use
 def pack_lib(name, libs) {
   sh (script: """
@@ -436,6 +440,23 @@ def unpack_lib(name, libs) {
      """, label: 'Unstash libraries and show md5')
 }
 
+// compress microtvm template projects and pack the tar.
+def pack_microtvm_template_projects(name) {
+  sh(
+    script: 'cd build && tar -czvf microtvm_template_projects.tar.gz 
microtvm_template_projects/',
+    label: 'Compress microtvm_template_projects'
+  )
+  pack_lib(name + '-microtvm-libs', microtvm_tar_gz)
+}
+
+def unpack_microtvm_template_projects(name) {
+  unpack_lib(name + '-microtvm-libs', microtvm_tar_gz)
+  sh(
+    script: 'cd build && tar -xzvf microtvm_template_projects.tar.gz',
+    label: 'Unpack microtvm_template_projects'
+  )
+}
+
 def ci_setup(image) {
   sh (
     script: "${docker_run} ${image} ./tests/scripts/task_ci_setup.sh",
@@ -484,6 +505,7 @@ stage('Build') {
           sh "${docker_run} --no-gpu ${ci_gpu} 
./tests/scripts/task_config_build_gpu.sh build"
           make("${ci_gpu} --no-gpu", 'build', '-j2')
           pack_lib('gpu', tvm_multilib)
+          pack_microtvm_template_projects('gpu')
           // compiler test
           sh "${docker_run} --no-gpu ${ci_gpu} 
./tests/scripts/task_config_build_gpu_other.sh build2"
           make("${ci_gpu} --no-gpu", 'build2', '-j2')
@@ -583,11 +605,8 @@ stage('Build') {
             label: 'Create QEMU cmake config',
           )
           make(ci_qemu, 'build', '-j2')
-          sh(
-            script: 'cd build && tar -czvf microtvm_template_projects.tar.gz 
microtvm_template_projects/',
-            label: 'Compress microtvm_template_projects'
-          )
-          pack_lib('qemu', microtvm_lib)
+          pack_lib('qemu', tvm_lib)
+          pack_microtvm_template_projects('qemu')
         }
       }
      } else {
@@ -986,11 +1005,8 @@ stage('Test') {
             try {
               init_git()
               withEnv(['PLATFORM=qemu'], {
-                unpack_lib('qemu', microtvm_lib)
-                sh(
-                  script: 'cd build && tar -xzvf 
microtvm_template_projects.tar.gz',
-                  label: 'Unpack microtvm_template_projects'
-                )
+                unpack_lib('qemu', tvm_lib)
+                unpack_microtvm_template_projects('qemu')
                 ci_setup(ci_qemu)
                 cpp_unittest(ci_qemu)
                 sh (
@@ -1296,6 +1312,7 @@ stage('Test') {
         ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/docs-python-gpu") {
           init_git()
           unpack_lib('gpu', tvm_multilib)
+          unpack_microtvm_template_projects('gpu')
           timeout(time: 180, unit: 'MINUTES') {
             ci_setup(ci_gpu)
             sh (
diff --git a/jenkins/Jenkinsfile.j2 b/jenkins/Jenkinsfile.j2
index 0d2d91ad91..e3d0124b70 100644
--- a/jenkins/Jenkinsfile.j2
+++ b/jenkins/Jenkinsfile.j2
@@ -72,17 +72,8 @@ properties([
   ])
 ])
 
-// tvm libraries
-tvm_runtime = 'build/libtvm_runtime.so, build/config.cmake'
-tvm_lib = 'build/libtvm.so, ' + tvm_runtime
-// LLVM upstream lib
-tvm_multilib = 'build/libtvm.so, ' +
-               'build/libvta_fsim.so, ' +
-               tvm_runtime
-
-tvm_multilib_tsim = 'build/libvta_tsim.so, ' +
-               tvm_multilib
-microtvm_lib = 'build/microtvm_template_projects.tar.gz, ' + tvm_lib
+// Global variable assigned during Sanity Check that holds the sha1 which 
should be
+// merged into the PR in all branches.
 upstream_revision = null
 
 // command to start a docker container
@@ -415,6 +406,19 @@ def make(docker_type, path, make_flag) {
   }
 }
 
+// Specifications to Jenkins "stash" command for use with various pack_ and 
unpack_ functions.
+tvm_runtime = 'build/libtvm_runtime.so, build/config.cmake'  // use 
libtvm_runtime.so.
+tvm_lib = 'build/libtvm.so, ' + tvm_runtime  // use libtvm.so to run the full 
compiler.
+// LLVM upstream lib
+tvm_multilib = 'build/libtvm.so, ' +
+               'build/libvta_fsim.so, ' +
+               tvm_runtime
+
+tvm_multilib_tsim = 'build/libvta_tsim.so, ' +
+                    tvm_multilib
+
+microtvm_tar_gz = 'build/microtvm_template_projects.tar.gz'
+
 // pack libraries for later use
 def pack_lib(name, libs) {
   sh (script: """
@@ -433,6 +437,23 @@ def unpack_lib(name, libs) {
      """, label: 'Unstash libraries and show md5')
 }
 
+// compress microtvm template projects and pack the tar.
+def pack_microtvm_template_projects(name) {
+  sh(
+    script: 'cd build && tar -czvf microtvm_template_projects.tar.gz 
microtvm_template_projects/',
+    label: 'Compress microtvm_template_projects'
+  )
+  pack_lib(name + '-microtvm-libs', microtvm_tar_gz)
+}
+
+def unpack_microtvm_template_projects(name) {
+  unpack_lib(name + '-microtvm-libs', microtvm_tar_gz)
+  sh(
+    script: 'cd build && tar -xzvf microtvm_template_projects.tar.gz',
+    label: 'Unpack microtvm_template_projects'
+  )
+}
+
 def ci_setup(image) {
   sh (
     script: "${docker_run} ${image} ./tests/scripts/task_ci_setup.sh",
@@ -481,6 +502,7 @@ stage('Build') {
           sh "${docker_run} --no-gpu ${ci_gpu} 
./tests/scripts/task_config_build_gpu.sh build"
           make("${ci_gpu} --no-gpu", 'build', '-j2')
           pack_lib('gpu', tvm_multilib)
+          pack_microtvm_template_projects('gpu')
           // compiler test
           sh "${docker_run} --no-gpu ${ci_gpu} 
./tests/scripts/task_config_build_gpu_other.sh build2"
           make("${ci_gpu} --no-gpu", 'build2', '-j2')
@@ -580,11 +602,8 @@ stage('Build') {
             label: 'Create QEMU cmake config',
           )
           make(ci_qemu, 'build', '-j2')
-          sh(
-            script: 'cd build && tar -czvf microtvm_template_projects.tar.gz 
microtvm_template_projects/',
-            label: 'Compress microtvm_template_projects'
-          )
-          pack_lib('qemu', microtvm_lib)
+          pack_lib('qemu', tvm_lib)
+          pack_microtvm_template_projects('qemu')
         }
       }
      } else {
@@ -717,11 +736,8 @@ stage('Test') {
     node="CPU", ws="tvm/test-qemu",
     platform="qemu",
   ) %}
-    unpack_lib('qemu', microtvm_lib)
-    sh(
-      script: 'cd build && tar -xzvf microtvm_template_projects.tar.gz',
-      label: 'Unpack microtvm_template_projects'
-    )
+    unpack_lib('qemu', tvm_lib)
+    unpack_microtvm_template_projects('qemu')
     ci_setup(ci_qemu)
     cpp_unittest(ci_qemu)
     sh (
@@ -766,7 +782,7 @@ stage('Test') {
     )
   {% endcall %}
   {% call(shard_index) m.sharded_test_step(
-    name="topi: GPU", 
+    name="topi: GPU",
     node="GPU",
     num_shards=2,
     ws="tvm/topi-python-gpu",
@@ -824,6 +840,7 @@ stage('Test') {
         ws({{ m.per_exec_ws('tvm/docs-python-gpu') }}) {
           init_git()
           unpack_lib('gpu', tvm_multilib)
+          unpack_microtvm_template_projects('gpu')
           timeout(time: 180, unit: 'MINUTES') {
             ci_setup(ci_gpu)
             sh (

Reply via email to