This is an automated email from the ASF dual-hosted git repository.
tqchen pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git
The following commit(s) were added to refs/heads/main by this push:
new 6f18e4ef77 [CI] Remove i386 and Hexagon from CI pipeline (1) (#18737)
6f18e4ef77 is described below
commit 6f18e4ef77fa45cc06ca33a2b9145b968a5cc0b7
Author: Masahiro Hiramori <[email protected]>
AuthorDate: Wed Feb 11 22:54:45 2026 +0900
[CI] Remove i386 and Hexagon from CI pipeline (1) (#18737)
Part of #18682
---
ci/jenkins/data.py | 8 -
ci/jenkins/generated/arm_jenkinsfile.groovy | 24 +-
ci/jenkins/generated/cpu_jenkinsfile.groovy | 22 +-
ci/jenkins/generated/docker_jenkinsfile.groovy | 76 +-
ci/jenkins/generated/gpu_jenkinsfile.groovy | 24 +-
ci/jenkins/generated/hexagon_jenkinsfile.groovy | 571 ---------------
ci/jenkins/generated/i386_jenkinsfile.groovy | 765 ---------------------
ci/jenkins/generated/lint_jenkinsfile.groovy | 24 +-
ci/jenkins/generated/wasm_jenkinsfile.groovy | 24 +-
ci/jenkins/templates/hexagon_jenkinsfile.groovy.j2 | 44 --
ci/jenkins/templates/i386_jenkinsfile.groovy.j2 | 60 --
ci/jenkins/templates/utils/base.groovy.j2 | 2 -
ci/scripts/github/github_tvmbot.py | 2 -
ci/scripts/github/update_branch.py | 2 -
tests/python/ci/test_ci.py | 2 -
.../codegen/test_target_codegen_cross_llvm.py | 9 -
tests/python/codegen/test_target_codegen_x86.py | 4 +-
tests/scripts/ci.py | 16 -
tests/scripts/task_build.py | 17 +-
tests/scripts/task_python_integration_i386only.sh | 23 -
20 files changed, 17 insertions(+), 1702 deletions(-)
diff --git a/ci/jenkins/data.py b/ci/jenkins/data.py
index 5a96d4889d..4292aa479f 100644
--- a/ci/jenkins/data.py
+++ b/ci/jenkins/data.py
@@ -62,14 +62,6 @@ docker_images = {
"tag": "tlcpack/ci-gpu:20251130-061900-c429a2b1",
"platform": "GPU",
},
- "ci_hexagon": {
- "tag": "tlcpack/ci-hexagon:20251130-061900-c429a2b1",
- "platform": "CPU",
- },
- "ci_i386": {
- "tag": "tlcpack/ci-i386:20251130-061900-c429a2b1",
- "platform": "CPU",
- },
"ci_lint": {
"tag": "tlcpack/ci-lint:20251130-061900-c429a2b1",
"platform": "CPU",
diff --git a/ci/jenkins/generated/arm_jenkinsfile.groovy
b/ci/jenkins/generated/arm_jenkinsfile.groovy
index b58ec70221..6309d6d477 100644
--- a/ci/jenkins/generated/arm_jenkinsfile.groovy
+++ b/ci/jenkins/generated/arm_jenkinsfile.groovy
@@ -60,7 +60,7 @@
// 'python3 jenkins/generate.py'
// Note: This timestamp is here to ensure that updates to the Jenkinsfile are
// always rebased on main before merging:
-// Generated at 2025-08-24T16:41:22.350930
+// Generated at 2026-02-09T16:32:44.108985
import org.jenkinsci.plugins.pipeline.modeldefinition.Utils
// These are set at runtime from data in ci/jenkins/docker-images.yml, update
@@ -70,10 +70,8 @@ ci_gpu = ''
ci_cpu = ''
ci_minimal = ''
ci_wasm = ''
-ci_i386 = ''
ci_cortexm = ''
ci_arm = ''
-ci_hexagon = ''
ci_riscv = ''
// Parameters to allow overriding (in Jenkins UI), the images
@@ -84,8 +82,6 @@ properties([
string(name: 'ci_arm_param', defaultValue: ''),
string(name: 'ci_cpu_param', defaultValue: ''),
string(name: 'ci_gpu_param', defaultValue: ''),
- string(name: 'ci_hexagon_param', defaultValue: ''),
- string(name: 'ci_i386_param', defaultValue: ''),
string(name: 'ci_lint_param', defaultValue: ''),
string(name: 'ci_wasm_param', defaultValue: ''),
])
@@ -96,8 +92,6 @@ properties([
built_ci_arm = null;
built_ci_cpu = null;
built_ci_gpu = null;
- built_ci_hexagon = null;
- built_ci_i386 = null;
built_ci_lint = null;
built_ci_wasm = null;
@@ -368,7 +362,7 @@ def prepare(node_type) {
if (env.DETERMINE_DOCKER_IMAGES == 'yes') {
sh(
- script: "./${jenkins_scripts_root}/determine_docker_images.py
ci_arm ci_cpu ci_gpu ci_hexagon ci_i386 ci_lint ci_wasm ",
+ script: "./${jenkins_scripts_root}/determine_docker_images.py
ci_arm ci_cpu ci_gpu ci_lint ci_wasm ",
label: 'Decide whether to use tlcpack or tlcpackstaging for Docker
images',
)
// Pull image names from the results of should_rebuild_docker.py
@@ -387,16 +381,6 @@ def prepare(node_type) {
label: "Find docker image name for ci_gpu",
returnStdout: true,
).trim()
- ci_hexagon = sh(
- script: "cat .docker-image-names/ci_hexagon",
- label: "Find docker image name for ci_hexagon",
- returnStdout: true,
- ).trim()
- ci_i386 = sh(
- script: "cat .docker-image-names/ci_i386",
- label: "Find docker image name for ci_i386",
- returnStdout: true,
- ).trim()
ci_lint = sh(
script: "cat .docker-image-names/ci_lint",
label: "Find docker image name for ci_lint",
@@ -412,8 +396,6 @@ def prepare(node_type) {
ci_arm = params.ci_arm_param ?: ci_arm
ci_cpu = params.ci_cpu_param ?: ci_cpu
ci_gpu = params.ci_gpu_param ?: ci_gpu
- ci_hexagon = params.ci_hexagon_param ?: ci_hexagon
- ci_i386 = params.ci_i386_param ?: ci_i386
ci_lint = params.ci_lint_param ?: ci_lint
ci_wasm = params.ci_wasm_param ?: ci_wasm
@@ -422,8 +404,6 @@ def prepare(node_type) {
echo " ci_arm = ${ci_arm}"
echo " ci_cpu = ${ci_cpu}"
echo " ci_gpu = ${ci_gpu}"
- echo " ci_hexagon = ${ci_hexagon}"
- echo " ci_i386 = ${ci_i386}"
echo " ci_lint = ${ci_lint}"
echo " ci_wasm = ${ci_wasm}"
""", label: 'Docker image names')
diff --git a/ci/jenkins/generated/cpu_jenkinsfile.groovy
b/ci/jenkins/generated/cpu_jenkinsfile.groovy
index 53c74d1115..0a2f126411 100644
--- a/ci/jenkins/generated/cpu_jenkinsfile.groovy
+++ b/ci/jenkins/generated/cpu_jenkinsfile.groovy
@@ -70,10 +70,8 @@ ci_gpu = ''
ci_cpu = ''
ci_minimal = ''
ci_wasm = ''
-ci_i386 = ''
ci_cortexm = ''
ci_arm = ''
-ci_hexagon = ''
ci_riscv = ''
// Parameters to allow overriding (in Jenkins UI), the images
@@ -84,8 +82,6 @@ properties([
string(name: 'ci_arm_param', defaultValue: ''),
string(name: 'ci_cpu_param', defaultValue: ''),
string(name: 'ci_gpu_param', defaultValue: ''),
- string(name: 'ci_hexagon_param', defaultValue: ''),
- string(name: 'ci_i386_param', defaultValue: ''),
string(name: 'ci_lint_param', defaultValue: ''),
string(name: 'ci_wasm_param', defaultValue: ''),
])
@@ -96,8 +92,6 @@ properties([
built_ci_arm = null;
built_ci_cpu = null;
built_ci_gpu = null;
- built_ci_hexagon = null;
- built_ci_i386 = null;
built_ci_lint = null;
built_ci_wasm = null;
@@ -368,7 +362,7 @@ def prepare(node_type) {
if (env.DETERMINE_DOCKER_IMAGES == 'yes') {
sh(
- script: "./${jenkins_scripts_root}/determine_docker_images.py
ci_arm ci_cpu ci_gpu ci_hexagon ci_i386 ci_lint ci_wasm ",
+ script: "./${jenkins_scripts_root}/determine_docker_images.py
ci_arm ci_cpu ci_gpu ci_lint ci_wasm ",
label: 'Decide whether to use tlcpack or tlcpackstaging for Docker
images',
)
// Pull image names from the results of should_rebuild_docker.py
@@ -387,16 +381,6 @@ def prepare(node_type) {
label: "Find docker image name for ci_gpu",
returnStdout: true,
).trim()
- ci_hexagon = sh(
- script: "cat .docker-image-names/ci_hexagon",
- label: "Find docker image name for ci_hexagon",
- returnStdout: true,
- ).trim()
- ci_i386 = sh(
- script: "cat .docker-image-names/ci_i386",
- label: "Find docker image name for ci_i386",
- returnStdout: true,
- ).trim()
ci_lint = sh(
script: "cat .docker-image-names/ci_lint",
label: "Find docker image name for ci_lint",
@@ -412,8 +396,6 @@ def prepare(node_type) {
ci_arm = params.ci_arm_param ?: ci_arm
ci_cpu = params.ci_cpu_param ?: ci_cpu
ci_gpu = params.ci_gpu_param ?: ci_gpu
- ci_hexagon = params.ci_hexagon_param ?: ci_hexagon
- ci_i386 = params.ci_i386_param ?: ci_i386
ci_lint = params.ci_lint_param ?: ci_lint
ci_wasm = params.ci_wasm_param ?: ci_wasm
@@ -422,8 +404,6 @@ def prepare(node_type) {
echo " ci_arm = ${ci_arm}"
echo " ci_cpu = ${ci_cpu}"
echo " ci_gpu = ${ci_gpu}"
- echo " ci_hexagon = ${ci_hexagon}"
- echo " ci_i386 = ${ci_i386}"
echo " ci_lint = ${ci_lint}"
echo " ci_wasm = ${ci_wasm}"
""", label: 'Docker image names')
diff --git a/ci/jenkins/generated/docker_jenkinsfile.groovy
b/ci/jenkins/generated/docker_jenkinsfile.groovy
index 2391d9a87a..d91e5107f4 100644
--- a/ci/jenkins/generated/docker_jenkinsfile.groovy
+++ b/ci/jenkins/generated/docker_jenkinsfile.groovy
@@ -60,7 +60,7 @@
// 'python3 jenkins/generate.py'
// Note: This timestamp is here to ensure that updates to the Jenkinsfile are
// always rebased on main before merging:
-// Generated at 2025-06-03T18:16:35.797894
+// Generated at 2026-02-09T16:32:44.070917
import org.jenkinsci.plugins.pipeline.modeldefinition.Utils
// These are set at runtime from data in ci/jenkins/docker-images.yml, update
@@ -70,10 +70,8 @@ ci_gpu = ''
ci_cpu = ''
ci_minimal = ''
ci_wasm = ''
-ci_i386 = ''
ci_cortexm = ''
ci_arm = ''
-ci_hexagon = ''
ci_riscv = ''
// Parameters to allow overriding (in Jenkins UI), the images
@@ -84,8 +82,6 @@ properties([
string(name: 'ci_arm_param', defaultValue: ''),
string(name: 'ci_cpu_param', defaultValue: ''),
string(name: 'ci_gpu_param', defaultValue: ''),
- string(name: 'ci_hexagon_param', defaultValue: ''),
- string(name: 'ci_i386_param', defaultValue: ''),
string(name: 'ci_lint_param', defaultValue: ''),
string(name: 'ci_wasm_param', defaultValue: ''),
])
@@ -96,8 +92,6 @@ properties([
built_ci_arm = null;
built_ci_cpu = null;
built_ci_gpu = null;
- built_ci_hexagon = null;
- built_ci_i386 = null;
built_ci_lint = null;
built_ci_wasm = null;
@@ -368,7 +362,7 @@ def prepare(node_type) {
if (env.DETERMINE_DOCKER_IMAGES == 'yes') {
sh(
- script: "./${jenkins_scripts_root}/determine_docker_images.py
ci_arm ci_cpu ci_gpu ci_hexagon ci_i386 ci_lint ci_wasm ",
+ script: "./${jenkins_scripts_root}/determine_docker_images.py
ci_arm ci_cpu ci_gpu ci_lint ci_wasm ",
label: 'Decide whether to use tlcpack or tlcpackstaging for Docker
images',
)
// Pull image names from the results of should_rebuild_docker.py
@@ -387,16 +381,6 @@ def prepare(node_type) {
label: "Find docker image name for ci_gpu",
returnStdout: true,
).trim()
- ci_hexagon = sh(
- script: "cat .docker-image-names/ci_hexagon",
- label: "Find docker image name for ci_hexagon",
- returnStdout: true,
- ).trim()
- ci_i386 = sh(
- script: "cat .docker-image-names/ci_i386",
- label: "Find docker image name for ci_i386",
- returnStdout: true,
- ).trim()
ci_lint = sh(
script: "cat .docker-image-names/ci_lint",
label: "Find docker image name for ci_lint",
@@ -412,8 +396,6 @@ def prepare(node_type) {
ci_arm = params.ci_arm_param ?: ci_arm
ci_cpu = params.ci_cpu_param ?: ci_cpu
ci_gpu = params.ci_gpu_param ?: ci_gpu
- ci_hexagon = params.ci_hexagon_param ?: ci_hexagon
- ci_i386 = params.ci_i386_param ?: ci_i386
ci_lint = params.ci_lint_param ?: ci_lint
ci_wasm = params.ci_wasm_param ?: ci_wasm
@@ -422,8 +404,6 @@ def prepare(node_type) {
echo " ci_arm = ${ci_arm}"
echo " ci_cpu = ${ci_cpu}"
echo " ci_gpu = ${ci_gpu}"
- echo " ci_hexagon = ${ci_hexagon}"
- echo " ci_i386 = ${ci_i386}"
echo " ci_lint = ${ci_lint}"
echo " ci_wasm = ${ci_wasm}"
""", label: 'Docker image names')
@@ -606,8 +586,6 @@ def deploy() {
update_docker(built_ci_arm,
"tlcpackstaging/ci_arm:${tag}")
update_docker(built_ci_cpu,
"tlcpackstaging/ci_cpu:${tag}")
update_docker(built_ci_gpu,
"tlcpackstaging/ci_gpu:${tag}")
- update_docker(built_ci_hexagon,
"tlcpackstaging/ci_hexagon:${tag}")
- update_docker(built_ci_i386,
"tlcpackstaging/ci_i386:${tag}")
update_docker(built_ci_lint,
"tlcpackstaging/ci_lint:${tag}")
update_docker(built_ci_wasm,
"tlcpackstaging/ci_wasm:${tag}")
} finally {
@@ -680,34 +658,6 @@ def deploy() {
label: 'Tag tlcpackstaging/ci_gpu image to
tlcpack',
)
}
- if (ci_hexagon.contains("tlcpackstaging")) {
- // Push image to tlcpack
- def tag = ci_hexagon.split(":")[1]
- sh(
- script: """
- set -eux
- . ${jenkins_scripts_root}/retry.sh
- docker pull tlcpackstaging/ci_hexagon:${tag}
- docker tag tlcpackstaging/ci_hexagon:${tag}
tlcpack/ci-hexagon:${tag}
- retry 5 docker push tlcpack/ci-hexagon:${tag}
- """,
- label: 'Tag tlcpackstaging/ci_hexagon image to
tlcpack',
- )
- }
- if (ci_i386.contains("tlcpackstaging")) {
- // Push image to tlcpack
- def tag = ci_i386.split(":")[1]
- sh(
- script: """
- set -eux
- . ${jenkins_scripts_root}/retry.sh
- docker pull tlcpackstaging/ci_i386:${tag}
- docker tag tlcpackstaging/ci_i386:${tag}
tlcpack/ci-i386:${tag}
- retry 5 docker push tlcpack/ci-i386:${tag}
- """,
- label: 'Tag tlcpackstaging/ci_i386 image to
tlcpack',
- )
- }
if (ci_lint.contains("tlcpackstaging")) {
// Push image to tlcpack
def tag = ci_lint.split(":")[1]
@@ -793,28 +743,6 @@ if (rebuild_docker_images) {
}
}
},
- 'ci_hexagon': {
- node('CPU') {
- timeout(time: max_time, unit: 'MINUTES') {
- init_git()
- // We're purposefully not setting the built image here since they
- // are not yet being uploaded to tlcpack
- // ci_hexagon = build_image('ci_hexagon')
- built_ci_hexagon = build_image('ci_hexagon');
- }
- }
- },
- 'ci_i386': {
- node('CPU') {
- timeout(time: max_time, unit: 'MINUTES') {
- init_git()
- // We're purposefully not setting the built image here since they
- // are not yet being uploaded to tlcpack
- // ci_i386 = build_image('ci_i386')
- built_ci_i386 = build_image('ci_i386');
- }
- }
- },
'ci_lint': {
node('CPU') {
timeout(time: max_time, unit: 'MINUTES') {
diff --git a/ci/jenkins/generated/gpu_jenkinsfile.groovy
b/ci/jenkins/generated/gpu_jenkinsfile.groovy
index e9ade66832..cc089aa56c 100644
--- a/ci/jenkins/generated/gpu_jenkinsfile.groovy
+++ b/ci/jenkins/generated/gpu_jenkinsfile.groovy
@@ -60,7 +60,7 @@
// 'python3 jenkins/generate.py'
// Note: This timestamp is here to ensure that updates to the Jenkinsfile are
// always rebased on main before merging:
-// Generated at 2025-08-24T16:41:22.312666
+// Generated at 2026-02-09T16:32:44.095534
import org.jenkinsci.plugins.pipeline.modeldefinition.Utils
// These are set at runtime from data in ci/jenkins/docker-images.yml, update
@@ -70,10 +70,8 @@ ci_gpu = ''
ci_cpu = ''
ci_minimal = ''
ci_wasm = ''
-ci_i386 = ''
ci_cortexm = ''
ci_arm = ''
-ci_hexagon = ''
ci_riscv = ''
// Parameters to allow overriding (in Jenkins UI), the images
@@ -84,8 +82,6 @@ properties([
string(name: 'ci_arm_param', defaultValue: ''),
string(name: 'ci_cpu_param', defaultValue: ''),
string(name: 'ci_gpu_param', defaultValue: ''),
- string(name: 'ci_hexagon_param', defaultValue: ''),
- string(name: 'ci_i386_param', defaultValue: ''),
string(name: 'ci_lint_param', defaultValue: ''),
string(name: 'ci_wasm_param', defaultValue: ''),
])
@@ -96,8 +92,6 @@ properties([
built_ci_arm = null;
built_ci_cpu = null;
built_ci_gpu = null;
- built_ci_hexagon = null;
- built_ci_i386 = null;
built_ci_lint = null;
built_ci_wasm = null;
@@ -368,7 +362,7 @@ def prepare(node_type) {
if (env.DETERMINE_DOCKER_IMAGES == 'yes') {
sh(
- script: "./${jenkins_scripts_root}/determine_docker_images.py
ci_arm ci_cpu ci_gpu ci_hexagon ci_i386 ci_lint ci_wasm ",
+ script: "./${jenkins_scripts_root}/determine_docker_images.py
ci_arm ci_cpu ci_gpu ci_lint ci_wasm ",
label: 'Decide whether to use tlcpack or tlcpackstaging for Docker
images',
)
// Pull image names from the results of should_rebuild_docker.py
@@ -387,16 +381,6 @@ def prepare(node_type) {
label: "Find docker image name for ci_gpu",
returnStdout: true,
).trim()
- ci_hexagon = sh(
- script: "cat .docker-image-names/ci_hexagon",
- label: "Find docker image name for ci_hexagon",
- returnStdout: true,
- ).trim()
- ci_i386 = sh(
- script: "cat .docker-image-names/ci_i386",
- label: "Find docker image name for ci_i386",
- returnStdout: true,
- ).trim()
ci_lint = sh(
script: "cat .docker-image-names/ci_lint",
label: "Find docker image name for ci_lint",
@@ -412,8 +396,6 @@ def prepare(node_type) {
ci_arm = params.ci_arm_param ?: ci_arm
ci_cpu = params.ci_cpu_param ?: ci_cpu
ci_gpu = params.ci_gpu_param ?: ci_gpu
- ci_hexagon = params.ci_hexagon_param ?: ci_hexagon
- ci_i386 = params.ci_i386_param ?: ci_i386
ci_lint = params.ci_lint_param ?: ci_lint
ci_wasm = params.ci_wasm_param ?: ci_wasm
@@ -422,8 +404,6 @@ def prepare(node_type) {
echo " ci_arm = ${ci_arm}"
echo " ci_cpu = ${ci_cpu}"
echo " ci_gpu = ${ci_gpu}"
- echo " ci_hexagon = ${ci_hexagon}"
- echo " ci_i386 = ${ci_i386}"
echo " ci_lint = ${ci_lint}"
echo " ci_wasm = ${ci_wasm}"
""", label: 'Docker image names')
diff --git a/ci/jenkins/generated/hexagon_jenkinsfile.groovy
b/ci/jenkins/generated/hexagon_jenkinsfile.groovy
deleted file mode 100644
index 0047981011..0000000000
--- a/ci/jenkins/generated/hexagon_jenkinsfile.groovy
+++ /dev/null
@@ -1,571 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-// -*- mode: groovy -*-
-
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-// Jenkins pipeline
-// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
-
-// Docker env used for testing
-// Different image may have different version tag
-// because some of them are more stable than anoter.
-//
-// Docker images are maintained by PMC, cached in dockerhub
-// and remains relatively stable over the time.
-// Flow for upgrading docker env(need commiter)
-//
-// - Send PR to upgrade build script in the repo
-// - Build the new docker image
-// - Tag the docker image with a new version and push to a binary cache.
-// - Update the version in the Jenkinsfile, send a PR
-// - Fix any issues wrt to the new image version in the PR
-// - Merge the PR and now we are in new version
-// - Tag the new version as the lates
-// - Periodically cleanup the old versions on local workers
-//
-
-// ============================= IMPORTANT NOTE =============================
-// This file is generated by 'jenkins/generate.py'. Do not edit this file
directly!
-// Make edits to 'jenkins/Jenkinsfile.j2' and regenerate this with
-// 'python3 jenkins/generate.py'
-// Note: This timestamp is here to ensure that updates to the Jenkinsfile are
-// always rebased on main before merging:
-// Generated at 2025-08-24T16:41:22.257116
-
-import org.jenkinsci.plugins.pipeline.modeldefinition.Utils
-// These are set at runtime from data in ci/jenkins/docker-images.yml, update
-// image tags in that file
-ci_lint = ''
-ci_gpu = ''
-ci_cpu = ''
-ci_minimal = ''
-ci_wasm = ''
-ci_i386 = ''
-ci_cortexm = ''
-ci_arm = ''
-ci_hexagon = ''
-ci_riscv = ''
-
-// Parameters to allow overriding (in Jenkins UI), the images
-// to be used by a given build. When provided, they take precedence
-// over default values above.
-properties([
- parameters([
- string(name: 'ci_arm_param', defaultValue: ''),
- string(name: 'ci_cpu_param', defaultValue: ''),
- string(name: 'ci_gpu_param', defaultValue: ''),
- string(name: 'ci_hexagon_param', defaultValue: ''),
- string(name: 'ci_i386_param', defaultValue: ''),
- string(name: 'ci_lint_param', defaultValue: ''),
- string(name: 'ci_wasm_param', defaultValue: ''),
- ])
-])
-
-// Placeholders for newly built Docker image names (if rebuild_docker_images
-// is used)
- built_ci_arm = null;
- built_ci_cpu = null;
- built_ci_gpu = null;
- built_ci_hexagon = null;
- built_ci_i386 = null;
- built_ci_lint = null;
- built_ci_wasm = null;
-
-// Global variable assigned during Sanity Check that holds the sha1 which
should be
-// merged into the PR in all branches.
-upstream_revision = null
-
-// command to start a docker container
-docker_run = 'docker/bash.sh --env CI --env PLATFORM --env TVM_SHARD_INDEX
--env TVM_NUM_SHARDS --env RUN_DISPLAY_URL --env PLATFORM --env SKIP_SLOW_TESTS
--env TEST_STEP_NAME'
-docker_build = 'docker/build.sh'
-// timeout in minutes
-max_time = 180
-rebuild_docker_images = false
-
-s3_bucket = 'tvm-jenkins-artifacts-prod'
-s3_prefix = "tvm/${env.BRANCH_NAME}/${env.BUILD_NUMBER}"
-
-// Jenkins script root directory
-jenkins_scripts_root = "ci/scripts/jenkins"
-
-
-// General note: Jenkins has limits on the size of a method (or top level code)
-// that are pretty strict, so most usage of groovy methods in these templates
-// are purely to satisfy the JVM
-def per_exec_ws(folder) {
- return "workspace/exec_${env.EXECUTOR_NUMBER}/" + folder
-}
-
-// initialize source codes
-def init_git() {
- retry(5) {
- checkout scm
- }
-
- // Add more info about job node
- sh (
- script: './tests/scripts/task_show_node_info.sh',
- label: 'Show executor node info',
- )
-
- // Determine merge commit to use for all stages
- if (env.BRANCH_NAME == 'main') {
- // Only set upstream_revision to HEAD and skip merging to avoid a race
with another commit merged to main.
- update_upstream_revision("HEAD")
- } else {
- // This is PR branch so merge with latest main.
- merge_with_main()
- }
-
- sh(
- script: """
- set -eux
- . ${jenkins_scripts_root}/retry.sh
- retry 3 timeout 5m git submodule update --init --recursive -f --jobs 0
- """,
- label: 'Update git submodules',
- )
- checkout_trusted_files()
-}
-
-def update_upstream_revision(git_ref) {
- if (upstream_revision == null) {
- upstream_revision = sh(
- script: "git log -1 ${git_ref} --format=\'%H\'",
- label: 'Determine upstream revision',
- returnStdout: true,
- ).trim()
- }
-}
-
-def merge_with_main() {
- sh (
- script: 'git fetch origin main',
- label: 'Fetch upstream',
- )
- update_upstream_revision("FETCH_HEAD")
- sh (
- script: "git -c user.name=TVM-Jenkins -c [email protected]
merge ${upstream_revision}",
- label: 'Merge to origin/main'
- )
-}
-
-def docker_init(image) {
- // Clear out all Docker images that aren't going to be used
- sh(
- script: """
- set -eux
- docker image ls --all
- IMAGES=\$(docker image ls --all --format '{{.Repository}}:{{.Tag}}
{{.ID}}')
-
- echo -e "Found images:\\n\$IMAGES"
- echo "\$IMAGES" | { grep -vE '${image}' || test \$? = 1; } | { xargs
docker rmi || test \$? = 123; }
-
- docker image ls --all
- """,
- label: 'Clean old Docker images',
- )
-
- if (image.contains("amazonaws.com")) {
- // If this string is in the image name it's from ECR and needs to be pulled
- // with the right credentials
- ecr_pull(image)
- } else {
- sh(
- script: """
- set -eux
- . ${jenkins_scripts_root}/retry.sh
- retry 5 docker pull ${image}
- """,
- label: 'Pull docker image',
- )
- }
-}
-
-def ecr_pull(full_name) {
- aws_account_id = sh(
- returnStdout: true,
- script: 'aws sts get-caller-identity | grep Account | cut -f4 -d\\"',
- label: 'Get AWS ID'
- ).trim()
-
- try {
- withEnv([
- "AWS_ACCOUNT_ID=${aws_account_id}",
- 'AWS_DEFAULT_REGION=us-west-2',
- "AWS_ECR_REPO=${aws_account_id}.dkr.ecr.us-west-2.amazonaws.com"]) {
- sh(
- script: '''
- set -eux
- aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker
login --username AWS --password-stdin $AWS_ECR_REPO
- ''',
- label: 'Log in to ECR'
- )
- sh(
- script: """
- set -eux
- . ${jenkins_scripts_root}/retry.sh
- retry 5 docker pull ${full_name}
- """,
- label: 'Pull image from ECR'
- )
- }
- } finally {
- withEnv([
- "AWS_ACCOUNT_ID=${aws_account_id}",
- 'AWS_DEFAULT_REGION=us-west-2',
- "AWS_ECR_REPO=${aws_account_id}.dkr.ecr.us-west-2.amazonaws.com"]) {
- sh(
- script: 'docker logout $AWS_ECR_REPO',
- label: 'Clean up login credentials'
- )
- }
- }
-}
-
-def should_skip_slow_tests(pr_number) {
- withCredentials([string(
- credentialsId: 'tvm-bot-jenkins-reader',
- variable: 'GITHUB_TOKEN',
- )]) {
- // Exit code of 1 means run slow tests, exit code of 0 means skip slow
tests
- result = sh (
- returnStatus: true,
- script: "./${jenkins_scripts_root}/should_run_slow_tests.py --pr
'${pr_number}'",
- label: 'Check if CI should run slow tests',
- )
- }
- return result == 0
-}
-
-def cancel_previous_build() {
- // cancel previous build if it is not on main.
- if (env.BRANCH_NAME != 'main') {
- def buildNumber = env.BUILD_NUMBER as int
- // Milestone API allows us to cancel previous build
- // with the same milestone number
- if (buildNumber > 1) milestone(buildNumber - 1)
- milestone(buildNumber)
- }
-}
-
-def is_last_build() {
- // check whether it is last build
- try {
- return currentBuild.number ==
currentBuild.rawBuild.project.getLastBuild().number
- } catch (Throwable ex) {
- echo 'Error during check is_last_build ' + ex.toString()
- return false
- }
-}
-
-def checkout_trusted_files() {
- // trust everything from branch builds
- if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) {
- return;
- }
-
- // trust peoople listed in CONTRIBUTING.md
- grep_code = sh(
- returnStatus: true,
- script: "git show '${upstream_revision}:CONTRIBUTORS.md' | grep
'@${env.CHANGE_AUTHOR}'",
- label: 'Check if change is from a contributor',
- )
-
- if (grep_code == 1) {
- // Any scripts that run on the bare host and not inside a Docker container
- // (especially those that access secrets) should be checked out here so
- // only trusted versions are used in CI
- sh(
- script: "git checkout ${upstream_revision} ${jenkins_scripts_root}/.",
- label: 'Check out trusted files',
- )
- }
-}
-
-def should_skip_ci(pr_number) {
- if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) {
- // never skip CI on build sourced from a branch
- return false
- }
- glob_skip_ci_code = sh (
- returnStatus: true,
- script: "./${jenkins_scripts_root}/git_skip_ci_globs.py",
- label: 'Check if CI should be skipped due to changed files',
- )
- if (glob_skip_ci_code == 0) {
- return true
- }
- withCredentials([string(
- credentialsId: 'tvm-bot-jenkins-reader',
- variable: 'GITHUB_TOKEN',
- )]) {
- // Exit code of 1 means run full CI (or the script had an error, so run
- // full CI just in case). Exit code of 0 means skip CI.
- git_skip_ci_code = sh (
- returnStatus: true,
- script: "./${jenkins_scripts_root}/git_skip_ci.py --pr '${pr_number}'",
- label: 'Check if CI should be skipped',
- )
- }
- return git_skip_ci_code == 0
-}
-
-def check_pr(pr_number) {
- if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) {
- // never skip CI on build sourced from a branch
- return false
- }
- withCredentials([string(
- credentialsId: 'tvm-bot-jenkins-reader',
- variable: 'GITHUB_TOKEN',
- )]) {
- sh (
- script: "python3 ${jenkins_scripts_root}/check_pr.py --pr ${pr_number}",
- label: 'Check PR title and body',
- )
- }
-
-}
-
-def prepare(node_type) {
- stage('Prepare') {
- node(node_type) {
- ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/prepare") {
- init_git()
-
- check_pr(env.CHANGE_ID)
-
- if (env.DETERMINE_DOCKER_IMAGES == 'yes') {
- sh(
- script: "./${jenkins_scripts_root}/determine_docker_images.py
ci_arm ci_cpu ci_gpu ci_hexagon ci_i386 ci_lint ci_wasm ",
- label: 'Decide whether to use tlcpack or tlcpackstaging for Docker
images',
- )
- // Pull image names from the results of should_rebuild_docker.py
- ci_arm = sh(
- script: "cat .docker-image-names/ci_arm",
- label: "Find docker image name for ci_arm",
- returnStdout: true,
- ).trim()
- ci_cpu = sh(
- script: "cat .docker-image-names/ci_cpu",
- label: "Find docker image name for ci_cpu",
- returnStdout: true,
- ).trim()
- ci_gpu = sh(
- script: "cat .docker-image-names/ci_gpu",
- label: "Find docker image name for ci_gpu",
- returnStdout: true,
- ).trim()
- ci_hexagon = sh(
- script: "cat .docker-image-names/ci_hexagon",
- label: "Find docker image name for ci_hexagon",
- returnStdout: true,
- ).trim()
- ci_i386 = sh(
- script: "cat .docker-image-names/ci_i386",
- label: "Find docker image name for ci_i386",
- returnStdout: true,
- ).trim()
- ci_lint = sh(
- script: "cat .docker-image-names/ci_lint",
- label: "Find docker image name for ci_lint",
- returnStdout: true,
- ).trim()
- ci_wasm = sh(
- script: "cat .docker-image-names/ci_wasm",
- label: "Find docker image name for ci_wasm",
- returnStdout: true,
- ).trim()
- }
-
- ci_arm = params.ci_arm_param ?: ci_arm
- ci_cpu = params.ci_cpu_param ?: ci_cpu
- ci_gpu = params.ci_gpu_param ?: ci_gpu
- ci_hexagon = params.ci_hexagon_param ?: ci_hexagon
- ci_i386 = params.ci_i386_param ?: ci_i386
- ci_lint = params.ci_lint_param ?: ci_lint
- ci_wasm = params.ci_wasm_param ?: ci_wasm
-
- sh (script: """
- echo "Docker images being used in this build:"
- echo " ci_arm = ${ci_arm}"
- echo " ci_cpu = ${ci_cpu}"
- echo " ci_gpu = ${ci_gpu}"
- echo " ci_hexagon = ${ci_hexagon}"
- echo " ci_i386 = ${ci_i386}"
- echo " ci_lint = ${ci_lint}"
- echo " ci_wasm = ${ci_wasm}"
- """, label: 'Docker image names')
-
- is_docs_only_build = sh (
- returnStatus: true,
- script: "./${jenkins_scripts_root}/git_change_docs.sh",
- label: 'Check for docs only changes',
- )
- skip_ci = should_skip_ci(env.CHANGE_ID)
- skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID)
- rebuild_docker_images = sh (
- returnStatus: true,
- script: "./${jenkins_scripts_root}/git_change_docker.sh",
- label: 'Check for any docker changes',
- )
-
- if (skip_ci) {
- // Don't rebuild when skipping CI
- rebuild_docker_images = false
- }
- }
- }
- }
-}
-def ci_setup(image) {
- sh (
- script: "${docker_run} ${image} ./tests/scripts/task_clear_pytest.sh",
- label: 'Clean up old workspace',
- )
-}
-
-def python_unittest(image) {
- sh (
- script: "${docker_run} ${image} ./tests/scripts/task_python_unittest.sh",
- label: 'Run Python unit tests',
- )
-}
-
-def make_cpp_tests(image, build_dir) {
- sh (
- script: """
- set -eux
- ${docker_run} ${image} python3 ./tests/scripts/task_build.py \
- --sccache-bucket tvm-sccache-prod \
- --sccache-region us-west-2 \
- --cmake-target cpptest \
- --build-dir ${build_dir}
- """,
- label: 'Make C++ tests',
- )
-}
-
-def cmake_build(image, path) {
- sh (
- script: "${docker_run} --env CI_NUM_EXECUTORS ${image}
./tests/scripts/task_build.py --sccache-bucket tvm-sccache-prod
--sccache-region us-west-2 --build-dir ${path}",
- label: 'Run cmake build',
- )
-}
-def cpp_unittest(image) {
- sh (
- script: "${docker_run} --env CI_NUM_EXECUTORS ${image}
./tests/scripts/task_cpp_unittest.sh",
- label: 'Run C++ tests',
- )
-}
-
-cancel_previous_build()
-
-try {
- prepare('CPU-SMALL-SPOT')
-} catch(Exception ex) {
- prepare('CPU-SMALL')
-}
-def run_build(node_type) {
- if (!skip_ci && is_docs_only_build != 1) {
- echo 'Begin running node_type ' + node_type
- node(node_type) {
- ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-hexagon") {
- init_git()
- docker_init(ci_hexagon)
- timeout(time: max_time, unit: 'MINUTES') {
-
- withEnv([
- 'PLATFORM=hexagon',
- ], {
- sh (
- script: "${docker_run} ${ci_hexagon}
./tests/scripts/task_config_build_hexagon.sh build",
- label: 'Create Hexagon cmake config',
- )
- cmake_build(ci_hexagon, 'build')
- make_cpp_tests(ci_hexagon, 'build')
- sh (
- script: "${docker_run} ${ci_hexagon}
./tests/scripts/task_build_hexagon_api.sh",
- label: 'Build Hexagon API',
- )
- sh(
- script: "./${jenkins_scripts_root}/s3.py --action upload --bucket
${s3_bucket} --prefix ${s3_prefix}/hexagon --items build/libtvm.so
build/libtvm_runtime.so build/lib/libtvm_ffi.so build/config.cmake
build/cpptest build/build.ninja build/CMakeFiles/rules.ninja
build/hexagon_api_output",
- label: 'Upload artifacts to S3',
- )
- })
- }
- }
- }
- echo 'End running node_type ' + node_type
- } else {
- Utils.markStageSkippedForConditional('BUILD: Hexagon')
- }
-}
-def build() {
- stage('Build') {
- try {
- run_build('CPU-SPOT')
- } catch (hudson.AbortException abortEx) {
- echo "Received normal AbortException, exit now. Details:" +
abortEx.toString()
- throw abortEx
- } catch (Throwable ex) {
- echo 'Exception during SPOT run ' + ex.toString()
- if (is_last_build()) {
- // retry if we are currently at last build
- // mark the current stage as success
- // and try again via on demand node
- echo 'Retry on-demand given it is last build'
- currentBuild.result = 'SUCCESS'
- run_build('CPU')
- } else {
- echo 'Exit since it is not last build'
- throw ex
- }
- }
- }
-}
-build()
-
-
-
-def test() {
- stage('Test') {
- environment {
- SKIP_SLOW_TESTS = "${skip_slow_tests}"
- }
- parallel(
- )
- }
-}
-test()
diff --git a/ci/jenkins/generated/i386_jenkinsfile.groovy
b/ci/jenkins/generated/i386_jenkinsfile.groovy
deleted file mode 100644
index e54ec2c606..0000000000
--- a/ci/jenkins/generated/i386_jenkinsfile.groovy
+++ /dev/null
@@ -1,765 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-// -*- mode: groovy -*-
-
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-// Jenkins pipeline
-// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
-
-// Docker env used for testing
-// Different image may have different version tag
-// because some of them are more stable than anoter.
-//
-// Docker images are maintained by PMC, cached in dockerhub
-// and remains relatively stable over the time.
-// Flow for upgrading docker env(need commiter)
-//
-// - Send PR to upgrade build script in the repo
-// - Build the new docker image
-// - Tag the docker image with a new version and push to a binary cache.
-// - Update the version in the Jenkinsfile, send a PR
-// - Fix any issues wrt to the new image version in the PR
-// - Merge the PR and now we are in new version
-// - Tag the new version as the lates
-// - Periodically cleanup the old versions on local workers
-//
-
-// ============================= IMPORTANT NOTE =============================
-// This file is generated by 'jenkins/generate.py'. Do not edit this file
directly!
-// Make edits to 'jenkins/Jenkinsfile.j2' and regenerate this with
-// 'python3 jenkins/generate.py'
-// Note: This timestamp is here to ensure that updates to the Jenkinsfile are
-// always rebased on main before merging:
-// Generated at 2025-08-24T16:41:22.332874
-
-import org.jenkinsci.plugins.pipeline.modeldefinition.Utils
-// These are set at runtime from data in ci/jenkins/docker-images.yml, update
-// image tags in that file
-ci_lint = ''
-ci_gpu = ''
-ci_cpu = ''
-ci_minimal = ''
-ci_wasm = ''
-ci_i386 = ''
-ci_cortexm = ''
-ci_arm = ''
-ci_hexagon = ''
-ci_riscv = ''
-
-// Parameters to allow overriding (in Jenkins UI), the images
-// to be used by a given build. When provided, they take precedence
-// over default values above.
-properties([
- parameters([
- string(name: 'ci_arm_param', defaultValue: ''),
- string(name: 'ci_cpu_param', defaultValue: ''),
- string(name: 'ci_gpu_param', defaultValue: ''),
- string(name: 'ci_hexagon_param', defaultValue: ''),
- string(name: 'ci_i386_param', defaultValue: ''),
- string(name: 'ci_lint_param', defaultValue: ''),
- string(name: 'ci_wasm_param', defaultValue: ''),
- ])
-])
-
-// Placeholders for newly built Docker image names (if rebuild_docker_images
-// is used)
- built_ci_arm = null;
- built_ci_cpu = null;
- built_ci_gpu = null;
- built_ci_hexagon = null;
- built_ci_i386 = null;
- built_ci_lint = null;
- built_ci_wasm = null;
-
-// Global variable assigned during Sanity Check that holds the sha1 which
should be
-// merged into the PR in all branches.
-upstream_revision = null
-
-// command to start a docker container
-docker_run = 'docker/bash.sh --env CI --env PLATFORM --env TVM_SHARD_INDEX
--env TVM_NUM_SHARDS --env RUN_DISPLAY_URL --env PLATFORM --env SKIP_SLOW_TESTS
--env TEST_STEP_NAME'
-docker_build = 'docker/build.sh'
-// timeout in minutes
-max_time = 180
-rebuild_docker_images = false
-
-s3_bucket = 'tvm-jenkins-artifacts-prod'
-s3_prefix = "tvm/${env.BRANCH_NAME}/${env.BUILD_NUMBER}"
-
-// Jenkins script root directory
-jenkins_scripts_root = "ci/scripts/jenkins"
-
-
-// General note: Jenkins has limits on the size of a method (or top level code)
-// that are pretty strict, so most usage of groovy methods in these templates
-// are purely to satisfy the JVM
-def per_exec_ws(folder) {
- return "workspace/exec_${env.EXECUTOR_NUMBER}/" + folder
-}
-
-// initialize source codes
-def init_git() {
- retry(5) {
- checkout scm
- }
-
- // Add more info about job node
- sh (
- script: './tests/scripts/task_show_node_info.sh',
- label: 'Show executor node info',
- )
-
- // Determine merge commit to use for all stages
- if (env.BRANCH_NAME == 'main') {
- // Only set upstream_revision to HEAD and skip merging to avoid a race
with another commit merged to main.
- update_upstream_revision("HEAD")
- } else {
- // This is PR branch so merge with latest main.
- merge_with_main()
- }
-
- sh(
- script: """
- set -eux
- . ${jenkins_scripts_root}/retry.sh
- retry 3 timeout 5m git submodule update --init --recursive -f --jobs 0
- """,
- label: 'Update git submodules',
- )
- checkout_trusted_files()
-}
-
-def update_upstream_revision(git_ref) {
- if (upstream_revision == null) {
- upstream_revision = sh(
- script: "git log -1 ${git_ref} --format=\'%H\'",
- label: 'Determine upstream revision',
- returnStdout: true,
- ).trim()
- }
-}
-
-def merge_with_main() {
- sh (
- script: 'git fetch origin main',
- label: 'Fetch upstream',
- )
- update_upstream_revision("FETCH_HEAD")
- sh (
- script: "git -c user.name=TVM-Jenkins -c [email protected]
merge ${upstream_revision}",
- label: 'Merge to origin/main'
- )
-}
-
-def docker_init(image) {
- // Clear out all Docker images that aren't going to be used
- sh(
- script: """
- set -eux
- docker image ls --all
- IMAGES=\$(docker image ls --all --format '{{.Repository}}:{{.Tag}}
{{.ID}}')
-
- echo -e "Found images:\\n\$IMAGES"
- echo "\$IMAGES" | { grep -vE '${image}' || test \$? = 1; } | { xargs
docker rmi || test \$? = 123; }
-
- docker image ls --all
- """,
- label: 'Clean old Docker images',
- )
-
- if (image.contains("amazonaws.com")) {
- // If this string is in the image name it's from ECR and needs to be pulled
- // with the right credentials
- ecr_pull(image)
- } else {
- sh(
- script: """
- set -eux
- . ${jenkins_scripts_root}/retry.sh
- retry 5 docker pull ${image}
- """,
- label: 'Pull docker image',
- )
- }
-}
-
-def ecr_pull(full_name) {
- aws_account_id = sh(
- returnStdout: true,
- script: 'aws sts get-caller-identity | grep Account | cut -f4 -d\\"',
- label: 'Get AWS ID'
- ).trim()
-
- try {
- withEnv([
- "AWS_ACCOUNT_ID=${aws_account_id}",
- 'AWS_DEFAULT_REGION=us-west-2',
- "AWS_ECR_REPO=${aws_account_id}.dkr.ecr.us-west-2.amazonaws.com"]) {
- sh(
- script: '''
- set -eux
- aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker
login --username AWS --password-stdin $AWS_ECR_REPO
- ''',
- label: 'Log in to ECR'
- )
- sh(
- script: """
- set -eux
- . ${jenkins_scripts_root}/retry.sh
- retry 5 docker pull ${full_name}
- """,
- label: 'Pull image from ECR'
- )
- }
- } finally {
- withEnv([
- "AWS_ACCOUNT_ID=${aws_account_id}",
- 'AWS_DEFAULT_REGION=us-west-2',
- "AWS_ECR_REPO=${aws_account_id}.dkr.ecr.us-west-2.amazonaws.com"]) {
- sh(
- script: 'docker logout $AWS_ECR_REPO',
- label: 'Clean up login credentials'
- )
- }
- }
-}
-
-def should_skip_slow_tests(pr_number) {
- withCredentials([string(
- credentialsId: 'tvm-bot-jenkins-reader',
- variable: 'GITHUB_TOKEN',
- )]) {
- // Exit code of 1 means run slow tests, exit code of 0 means skip slow
tests
- result = sh (
- returnStatus: true,
- script: "./${jenkins_scripts_root}/should_run_slow_tests.py --pr
'${pr_number}'",
- label: 'Check if CI should run slow tests',
- )
- }
- return result == 0
-}
-
-def cancel_previous_build() {
- // cancel previous build if it is not on main.
- if (env.BRANCH_NAME != 'main') {
- def buildNumber = env.BUILD_NUMBER as int
- // Milestone API allows us to cancel previous build
- // with the same milestone number
- if (buildNumber > 1) milestone(buildNumber - 1)
- milestone(buildNumber)
- }
-}
-
-def is_last_build() {
- // check whether it is last build
- try {
- return currentBuild.number ==
currentBuild.rawBuild.project.getLastBuild().number
- } catch (Throwable ex) {
- echo 'Error during check is_last_build ' + ex.toString()
- return false
- }
-}
-
-def checkout_trusted_files() {
- // trust everything from branch builds
- if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) {
- return;
- }
-
- // trust peoople listed in CONTRIBUTING.md
- grep_code = sh(
- returnStatus: true,
- script: "git show '${upstream_revision}:CONTRIBUTORS.md' | grep
'@${env.CHANGE_AUTHOR}'",
- label: 'Check if change is from a contributor',
- )
-
- if (grep_code == 1) {
- // Any scripts that run on the bare host and not inside a Docker container
- // (especially those that access secrets) should be checked out here so
- // only trusted versions are used in CI
- sh(
- script: "git checkout ${upstream_revision} ${jenkins_scripts_root}/.",
- label: 'Check out trusted files',
- )
- }
-}
-
-def should_skip_ci(pr_number) {
- if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) {
- // never skip CI on build sourced from a branch
- return false
- }
- glob_skip_ci_code = sh (
- returnStatus: true,
- script: "./${jenkins_scripts_root}/git_skip_ci_globs.py",
- label: 'Check if CI should be skipped due to changed files',
- )
- if (glob_skip_ci_code == 0) {
- return true
- }
- withCredentials([string(
- credentialsId: 'tvm-bot-jenkins-reader',
- variable: 'GITHUB_TOKEN',
- )]) {
- // Exit code of 1 means run full CI (or the script had an error, so run
- // full CI just in case). Exit code of 0 means skip CI.
- git_skip_ci_code = sh (
- returnStatus: true,
- script: "./${jenkins_scripts_root}/git_skip_ci.py --pr '${pr_number}'",
- label: 'Check if CI should be skipped',
- )
- }
- return git_skip_ci_code == 0
-}
-
-def check_pr(pr_number) {
- if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) {
- // never skip CI on build sourced from a branch
- return false
- }
- withCredentials([string(
- credentialsId: 'tvm-bot-jenkins-reader',
- variable: 'GITHUB_TOKEN',
- )]) {
- sh (
- script: "python3 ${jenkins_scripts_root}/check_pr.py --pr ${pr_number}",
- label: 'Check PR title and body',
- )
- }
-
-}
-
-def prepare(node_type) {
- stage('Prepare') {
- node(node_type) {
- ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/prepare") {
- init_git()
-
- check_pr(env.CHANGE_ID)
-
- if (env.DETERMINE_DOCKER_IMAGES == 'yes') {
- sh(
- script: "./${jenkins_scripts_root}/determine_docker_images.py
ci_arm ci_cpu ci_gpu ci_hexagon ci_i386 ci_lint ci_wasm ",
- label: 'Decide whether to use tlcpack or tlcpackstaging for Docker
images',
- )
- // Pull image names from the results of should_rebuild_docker.py
- ci_arm = sh(
- script: "cat .docker-image-names/ci_arm",
- label: "Find docker image name for ci_arm",
- returnStdout: true,
- ).trim()
- ci_cpu = sh(
- script: "cat .docker-image-names/ci_cpu",
- label: "Find docker image name for ci_cpu",
- returnStdout: true,
- ).trim()
- ci_gpu = sh(
- script: "cat .docker-image-names/ci_gpu",
- label: "Find docker image name for ci_gpu",
- returnStdout: true,
- ).trim()
- ci_hexagon = sh(
- script: "cat .docker-image-names/ci_hexagon",
- label: "Find docker image name for ci_hexagon",
- returnStdout: true,
- ).trim()
- ci_i386 = sh(
- script: "cat .docker-image-names/ci_i386",
- label: "Find docker image name for ci_i386",
- returnStdout: true,
- ).trim()
- ci_lint = sh(
- script: "cat .docker-image-names/ci_lint",
- label: "Find docker image name for ci_lint",
- returnStdout: true,
- ).trim()
- ci_wasm = sh(
- script: "cat .docker-image-names/ci_wasm",
- label: "Find docker image name for ci_wasm",
- returnStdout: true,
- ).trim()
- }
-
- ci_arm = params.ci_arm_param ?: ci_arm
- ci_cpu = params.ci_cpu_param ?: ci_cpu
- ci_gpu = params.ci_gpu_param ?: ci_gpu
- ci_hexagon = params.ci_hexagon_param ?: ci_hexagon
- ci_i386 = params.ci_i386_param ?: ci_i386
- ci_lint = params.ci_lint_param ?: ci_lint
- ci_wasm = params.ci_wasm_param ?: ci_wasm
-
- sh (script: """
- echo "Docker images being used in this build:"
- echo " ci_arm = ${ci_arm}"
- echo " ci_cpu = ${ci_cpu}"
- echo " ci_gpu = ${ci_gpu}"
- echo " ci_hexagon = ${ci_hexagon}"
- echo " ci_i386 = ${ci_i386}"
- echo " ci_lint = ${ci_lint}"
- echo " ci_wasm = ${ci_wasm}"
- """, label: 'Docker image names')
-
- is_docs_only_build = sh (
- returnStatus: true,
- script: "./${jenkins_scripts_root}/git_change_docs.sh",
- label: 'Check for docs only changes',
- )
- skip_ci = should_skip_ci(env.CHANGE_ID)
- skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID)
- rebuild_docker_images = sh (
- returnStatus: true,
- script: "./${jenkins_scripts_root}/git_change_docker.sh",
- label: 'Check for any docker changes',
- )
-
- if (skip_ci) {
- // Don't rebuild when skipping CI
- rebuild_docker_images = false
- }
- }
- }
- }
-}
-def ci_setup(image) {
- sh (
- script: "${docker_run} ${image} ./tests/scripts/task_clear_pytest.sh",
- label: 'Clean up old workspace',
- )
-}
-
-def python_unittest(image) {
- sh (
- script: "${docker_run} ${image} ./tests/scripts/task_python_unittest.sh",
- label: 'Run Python unit tests',
- )
-}
-
-def make_cpp_tests(image, build_dir) {
- sh (
- script: """
- set -eux
- ${docker_run} ${image} python3 ./tests/scripts/task_build.py \
- --sccache-bucket tvm-sccache-prod \
- --sccache-region us-west-2 \
- --cmake-target cpptest \
- --build-dir ${build_dir}
- """,
- label: 'Make C++ tests',
- )
-}
-
-def cmake_build(image, path) {
- sh (
- script: "${docker_run} --env CI_NUM_EXECUTORS ${image}
./tests/scripts/task_build.py --sccache-bucket tvm-sccache-prod
--sccache-region us-west-2 --build-dir ${path}",
- label: 'Run cmake build',
- )
-}
-def cpp_unittest(image) {
- sh (
- script: "${docker_run} --env CI_NUM_EXECUTORS ${image}
./tests/scripts/task_cpp_unittest.sh",
- label: 'Run C++ tests',
- )
-}
-
-cancel_previous_build()
-
-try {
- prepare('CPU-SMALL-SPOT')
-} catch(Exception ex) {
- prepare('CPU-SMALL')
-}
-def run_build(node_type) {
- if (!skip_ci && is_docs_only_build != 1) {
- echo 'Begin running node_type ' + node_type
- node(node_type) {
- ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-i386") {
- init_git()
- docker_init(ci_i386)
- timeout(time: max_time, unit: 'MINUTES') {
-
- withEnv([
- 'PLATFORM=i386',
- ], {
- sh (
- script: "${docker_run} ${ci_i386}
./tests/scripts/task_config_build_i386.sh build",
- label: 'Create i386 cmake config',
- )
- cmake_build(ci_i386, 'build')
- make_cpp_tests(ci_i386, 'build')
- sh(
- script: "./${jenkins_scripts_root}/s3.py --action upload --bucket
${s3_bucket} --prefix ${s3_prefix}/i386 --items build/libtvm.so
build/libtvm_runtime.so build/lib/libtvm_ffi.so build/config.cmake
build/cpptest build/build.ninja build/CMakeFiles/rules.ninja",
- label: 'Upload artifacts to S3',
- )
- })
- }
- }
- }
- echo 'End running node_type ' + node_type
- } else {
- Utils.markStageSkippedForConditional('BUILD: i386')
- }
-}
-def build() {
- stage('Build') {
- try {
- run_build('CPU-SPOT')
- } catch (hudson.AbortException abortEx) {
- echo "Received normal AbortException, exit now. Details:" +
abortEx.toString()
- throw abortEx
- } catch (Throwable ex) {
- echo 'Exception during SPOT run ' + ex.toString()
- if (is_last_build()) {
- // retry if we are currently at last build
- // mark the current stage as success
- // and try again via on demand node
- echo 'Retry on-demand given it is last build'
- currentBuild.result = 'SUCCESS'
- run_build('CPU')
- } else {
- echo 'Exit since it is not last build'
- throw ex
- }
- }
- }
-}
-build()
-
-
-
-
-def shard_run_python_i386_1_of_3(node_type) {
- echo 'Begin running on node_type ' + node_type
- if (!skip_ci && is_docs_only_build != 1) {
- node(node_type) {
- ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-i386") {
- // NOTE: if exception happens, it will be caught outside
- init_git()
- docker_init(ci_i386)
- timeout(time: max_time, unit: 'MINUTES') {
- withEnv([
- 'PLATFORM=i386',
- 'TEST_STEP_NAME=python: i386',
- 'TVM_NUM_SHARDS=3',
- 'TVM_SHARD_INDEX=0',
- "SKIP_SLOW_TESTS=${skip_slow_tests}"], {
- sh(
- script: "./${jenkins_scripts_root}/s3.py --action download
--bucket ${s3_bucket} --prefix ${s3_prefix}/i386",
- label: 'Download artifacts from S3',
- )
-
- ci_setup(ci_i386)
- cpp_unittest(ci_i386)
- python_unittest(ci_i386)
- sh (
- script: "${docker_run} ${ci_i386}
./tests/scripts/task_python_integration_i386only.sh",
- label: 'Run i386 integration tests',
- )
- })
- }
- // only run upload if things are successful
- try {
- sh(
- script: "./${jenkins_scripts_root}/s3.py --action upload --bucket
${s3_bucket} --prefix ${s3_prefix}/pytest-results/python_i386 --items
build/pytest-results",
- label: 'Upload JUnits to S3',
- )
-
- junit 'build/pytest-results/*.xml'
- } catch (Exception e) {
- echo 'Exception during JUnit upload: ' + e.toString()
- }
- }
- }
- echo 'End running on node_type ' + node_type
- } else {
- Utils.markStageSkippedForConditional('python: i386 1 of 3')
- }
-}
-
-def shard_run_python_i386_2_of_3(node_type) {
- echo 'Begin running on node_type ' + node_type
- if (!skip_ci && is_docs_only_build != 1) {
- node(node_type) {
- ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-i386") {
- // NOTE: if exception happens, it will be caught outside
- init_git()
- docker_init(ci_i386)
- timeout(time: max_time, unit: 'MINUTES') {
- withEnv([
- 'PLATFORM=i386',
- 'TEST_STEP_NAME=python: i386',
- 'TVM_NUM_SHARDS=3',
- 'TVM_SHARD_INDEX=1',
- "SKIP_SLOW_TESTS=${skip_slow_tests}"], {
- sh(
- script: "./${jenkins_scripts_root}/s3.py --action download
--bucket ${s3_bucket} --prefix ${s3_prefix}/i386",
- label: 'Download artifacts from S3',
- )
-
- ci_setup(ci_i386)
- python_unittest(ci_i386)
- sh (
- script: "${docker_run} ${ci_i386}
./tests/scripts/task_python_integration_i386only.sh",
- label: 'Run i386 integration tests',
- )
- })
- }
- // only run upload if things are successful
- try {
- sh(
- script: "./${jenkins_scripts_root}/s3.py --action upload --bucket
${s3_bucket} --prefix ${s3_prefix}/pytest-results/python_i386 --items
build/pytest-results",
- label: 'Upload JUnits to S3',
- )
-
- junit 'build/pytest-results/*.xml'
- } catch (Exception e) {
- echo 'Exception during JUnit upload: ' + e.toString()
- }
- }
- }
- echo 'End running on node_type ' + node_type
- } else {
- Utils.markStageSkippedForConditional('python: i386 2 of 3')
- }
-}
-
-def shard_run_python_i386_3_of_3(node_type) {
- echo 'Begin running on node_type ' + node_type
- if (!skip_ci && is_docs_only_build != 1) {
- node(node_type) {
- ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/integration-python-i386") {
- // NOTE: if exception happens, it will be caught outside
- init_git()
- docker_init(ci_i386)
- timeout(time: max_time, unit: 'MINUTES') {
- withEnv([
- 'PLATFORM=i386',
- 'TEST_STEP_NAME=python: i386',
- 'TVM_NUM_SHARDS=3',
- 'TVM_SHARD_INDEX=2',
- "SKIP_SLOW_TESTS=${skip_slow_tests}"], {
- sh(
- script: "./${jenkins_scripts_root}/s3.py --action download
--bucket ${s3_bucket} --prefix ${s3_prefix}/i386",
- label: 'Download artifacts from S3',
- )
-
- ci_setup(ci_i386)
- python_unittest(ci_i386)
- sh (
- script: "${docker_run} ${ci_i386}
./tests/scripts/task_python_integration_i386only.sh",
- label: 'Run i386 integration tests',
- )
- })
- }
- // only run upload if things are successful
- try {
- sh(
- script: "./${jenkins_scripts_root}/s3.py --action upload --bucket
${s3_bucket} --prefix ${s3_prefix}/pytest-results/python_i386 --items
build/pytest-results",
- label: 'Upload JUnits to S3',
- )
-
- junit 'build/pytest-results/*.xml'
- } catch (Exception e) {
- echo 'Exception during JUnit upload: ' + e.toString()
- }
- }
- }
- echo 'End running on node_type ' + node_type
- } else {
- Utils.markStageSkippedForConditional('python: i386 3 of 3')
- }
-}
-
-
-def test() {
- stage('Test') {
- environment {
- SKIP_SLOW_TESTS = "${skip_slow_tests}"
- }
- parallel(
- 'python: i386 1 of 3': {
- try {
- shard_run_python_i386_1_of_3('CPU-SMALL-SPOT')
- } catch (Throwable ex) {
- echo 'Exception during SPOT run ' + ex.toString()
- if (is_last_build()) {
- // retry if at last build
- // mark the current stage as success
- // and try again via on demand node
- echo 'Retry on-demand given it is last build'
- currentBuild.result = 'SUCCESS'
- shard_run_python_i386_1_of_3('CPU-SMALL')
- } else {
- echo 'Exit since it is not last build'
- throw ex
- }
- }
- },
- 'python: i386 2 of 3': {
- try {
- shard_run_python_i386_2_of_3('CPU-SMALL-SPOT')
- } catch (Throwable ex) {
- echo 'Exception during SPOT run ' + ex.toString()
- if (is_last_build()) {
- // retry if at last build
- // mark the current stage as success
- // and try again via on demand node
- echo 'Retry on-demand given it is last build'
- currentBuild.result = 'SUCCESS'
- shard_run_python_i386_2_of_3('CPU-SMALL')
- } else {
- echo 'Exit since it is not last build'
- throw ex
- }
- }
- },
- 'python: i386 3 of 3': {
- try {
- shard_run_python_i386_3_of_3('CPU-SMALL-SPOT')
- } catch (Throwable ex) {
- echo 'Exception during SPOT run ' + ex.toString()
- if (is_last_build()) {
- // retry if at last build
- // mark the current stage as success
- // and try again via on demand node
- echo 'Retry on-demand given it is last build'
- currentBuild.result = 'SUCCESS'
- shard_run_python_i386_3_of_3('CPU-SMALL')
- } else {
- echo 'Exit since it is not last build'
- throw ex
- }
- }
- },
- )
- }
-}
-test()
diff --git a/ci/jenkins/generated/lint_jenkinsfile.groovy
b/ci/jenkins/generated/lint_jenkinsfile.groovy
index c347ddc408..34cd7512bf 100644
--- a/ci/jenkins/generated/lint_jenkinsfile.groovy
+++ b/ci/jenkins/generated/lint_jenkinsfile.groovy
@@ -60,7 +60,7 @@
// 'python3 jenkins/generate.py'
// Note: This timestamp is here to ensure that updates to the Jenkinsfile are
// always rebased on main before merging:
-// Generated at 2025-06-03T18:16:35.827692
+// Generated at 2026-02-09T16:32:44.083887
import org.jenkinsci.plugins.pipeline.modeldefinition.Utils
// These are set at runtime from data in ci/jenkins/docker-images.yml, update
@@ -70,10 +70,8 @@ ci_gpu = ''
ci_cpu = ''
ci_minimal = ''
ci_wasm = ''
-ci_i386 = ''
ci_cortexm = ''
ci_arm = ''
-ci_hexagon = ''
ci_riscv = ''
// Parameters to allow overriding (in Jenkins UI), the images
@@ -84,8 +82,6 @@ properties([
string(name: 'ci_arm_param', defaultValue: ''),
string(name: 'ci_cpu_param', defaultValue: ''),
string(name: 'ci_gpu_param', defaultValue: ''),
- string(name: 'ci_hexagon_param', defaultValue: ''),
- string(name: 'ci_i386_param', defaultValue: ''),
string(name: 'ci_lint_param', defaultValue: ''),
string(name: 'ci_wasm_param', defaultValue: ''),
])
@@ -96,8 +92,6 @@ properties([
built_ci_arm = null;
built_ci_cpu = null;
built_ci_gpu = null;
- built_ci_hexagon = null;
- built_ci_i386 = null;
built_ci_lint = null;
built_ci_wasm = null;
@@ -368,7 +362,7 @@ def prepare(node_type) {
if (env.DETERMINE_DOCKER_IMAGES == 'yes') {
sh(
- script: "./${jenkins_scripts_root}/determine_docker_images.py
ci_arm ci_cpu ci_gpu ci_hexagon ci_i386 ci_lint ci_wasm ",
+ script: "./${jenkins_scripts_root}/determine_docker_images.py
ci_arm ci_cpu ci_gpu ci_lint ci_wasm ",
label: 'Decide whether to use tlcpack or tlcpackstaging for Docker
images',
)
// Pull image names from the results of should_rebuild_docker.py
@@ -387,16 +381,6 @@ def prepare(node_type) {
label: "Find docker image name for ci_gpu",
returnStdout: true,
).trim()
- ci_hexagon = sh(
- script: "cat .docker-image-names/ci_hexagon",
- label: "Find docker image name for ci_hexagon",
- returnStdout: true,
- ).trim()
- ci_i386 = sh(
- script: "cat .docker-image-names/ci_i386",
- label: "Find docker image name for ci_i386",
- returnStdout: true,
- ).trim()
ci_lint = sh(
script: "cat .docker-image-names/ci_lint",
label: "Find docker image name for ci_lint",
@@ -412,8 +396,6 @@ def prepare(node_type) {
ci_arm = params.ci_arm_param ?: ci_arm
ci_cpu = params.ci_cpu_param ?: ci_cpu
ci_gpu = params.ci_gpu_param ?: ci_gpu
- ci_hexagon = params.ci_hexagon_param ?: ci_hexagon
- ci_i386 = params.ci_i386_param ?: ci_i386
ci_lint = params.ci_lint_param ?: ci_lint
ci_wasm = params.ci_wasm_param ?: ci_wasm
@@ -422,8 +404,6 @@ def prepare(node_type) {
echo " ci_arm = ${ci_arm}"
echo " ci_cpu = ${ci_cpu}"
echo " ci_gpu = ${ci_gpu}"
- echo " ci_hexagon = ${ci_hexagon}"
- echo " ci_i386 = ${ci_i386}"
echo " ci_lint = ${ci_lint}"
echo " ci_wasm = ${ci_wasm}"
""", label: 'Docker image names')
diff --git a/ci/jenkins/generated/wasm_jenkinsfile.groovy
b/ci/jenkins/generated/wasm_jenkinsfile.groovy
index 4a6ccac25f..fe3502ee43 100644
--- a/ci/jenkins/generated/wasm_jenkinsfile.groovy
+++ b/ci/jenkins/generated/wasm_jenkinsfile.groovy
@@ -60,7 +60,7 @@
// 'python3 jenkins/generate.py'
// Note: This timestamp is here to ensure that updates to the Jenkinsfile are
// always rebased on main before merging:
-// Generated at 2025-08-24T11:52:44.735820
+// Generated at 2026-02-09T16:32:44.060039
import org.jenkinsci.plugins.pipeline.modeldefinition.Utils
// These are set at runtime from data in ci/jenkins/docker-images.yml, update
@@ -70,10 +70,8 @@ ci_gpu = ''
ci_cpu = ''
ci_minimal = ''
ci_wasm = ''
-ci_i386 = ''
ci_cortexm = ''
ci_arm = ''
-ci_hexagon = ''
ci_riscv = ''
// Parameters to allow overriding (in Jenkins UI), the images
@@ -84,8 +82,6 @@ properties([
string(name: 'ci_arm_param', defaultValue: ''),
string(name: 'ci_cpu_param', defaultValue: ''),
string(name: 'ci_gpu_param', defaultValue: ''),
- string(name: 'ci_hexagon_param', defaultValue: ''),
- string(name: 'ci_i386_param', defaultValue: ''),
string(name: 'ci_lint_param', defaultValue: ''),
string(name: 'ci_wasm_param', defaultValue: ''),
])
@@ -96,8 +92,6 @@ properties([
built_ci_arm = null;
built_ci_cpu = null;
built_ci_gpu = null;
- built_ci_hexagon = null;
- built_ci_i386 = null;
built_ci_lint = null;
built_ci_wasm = null;
@@ -368,7 +362,7 @@ def prepare(node_type) {
if (env.DETERMINE_DOCKER_IMAGES == 'yes') {
sh(
- script: "./${jenkins_scripts_root}/determine_docker_images.py
ci_arm ci_cpu ci_gpu ci_hexagon ci_i386 ci_lint ci_wasm ",
+ script: "./${jenkins_scripts_root}/determine_docker_images.py
ci_arm ci_cpu ci_gpu ci_lint ci_wasm ",
label: 'Decide whether to use tlcpack or tlcpackstaging for Docker
images',
)
// Pull image names from the results of should_rebuild_docker.py
@@ -387,16 +381,6 @@ def prepare(node_type) {
label: "Find docker image name for ci_gpu",
returnStdout: true,
).trim()
- ci_hexagon = sh(
- script: "cat .docker-image-names/ci_hexagon",
- label: "Find docker image name for ci_hexagon",
- returnStdout: true,
- ).trim()
- ci_i386 = sh(
- script: "cat .docker-image-names/ci_i386",
- label: "Find docker image name for ci_i386",
- returnStdout: true,
- ).trim()
ci_lint = sh(
script: "cat .docker-image-names/ci_lint",
label: "Find docker image name for ci_lint",
@@ -412,8 +396,6 @@ def prepare(node_type) {
ci_arm = params.ci_arm_param ?: ci_arm
ci_cpu = params.ci_cpu_param ?: ci_cpu
ci_gpu = params.ci_gpu_param ?: ci_gpu
- ci_hexagon = params.ci_hexagon_param ?: ci_hexagon
- ci_i386 = params.ci_i386_param ?: ci_i386
ci_lint = params.ci_lint_param ?: ci_lint
ci_wasm = params.ci_wasm_param ?: ci_wasm
@@ -422,8 +404,6 @@ def prepare(node_type) {
echo " ci_arm = ${ci_arm}"
echo " ci_cpu = ${ci_cpu}"
echo " ci_gpu = ${ci_gpu}"
- echo " ci_hexagon = ${ci_hexagon}"
- echo " ci_i386 = ${ci_i386}"
echo " ci_lint = ${ci_lint}"
echo " ci_wasm = ${ci_wasm}"
""", label: 'Docker image names')
diff --git a/ci/jenkins/templates/hexagon_jenkinsfile.groovy.j2
b/ci/jenkins/templates/hexagon_jenkinsfile.groovy.j2
deleted file mode 100644
index b4177b3329..0000000000
--- a/ci/jenkins/templates/hexagon_jenkinsfile.groovy.j2
+++ /dev/null
@@ -1,44 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-{% include "utils/base.groovy.j2" with context %}
-{% import 'utils/macros.j2' as m with context -%}
-
-{% call m.invoke_build(
- name='BUILD: Hexagon',
- node='CPU',
- condition='!skip_ci && is_docs_only_build != 1',
- ws='tvm/build-hexagon',
- docker_image='ci_hexagon',
- platform="hexagon",
-) %}
- sh (
- script: "${docker_run} ${ci_hexagon}
./tests/scripts/task_config_build_hexagon.sh build",
- label: 'Create Hexagon cmake config',
- )
- cmake_build(ci_hexagon, 'build')
- make_cpp_tests(ci_hexagon, 'build')
- sh (
- script: "${docker_run} ${ci_hexagon}
./tests/scripts/task_build_hexagon_api.sh",
- label: 'Build Hexagon API',
- )
- {{ m.upload_artifacts(tag='hexagon', filenames=tvm_lib + cpptest +
hexagon_api) }}
-{% endcall %}
-
-
-{% set test_method_names = [] %}
-
-{{ m.invoke_tests(node="CPU-SMALL", test_method_names=test_method_names) -}}
diff --git a/ci/jenkins/templates/i386_jenkinsfile.groovy.j2
b/ci/jenkins/templates/i386_jenkinsfile.groovy.j2
deleted file mode 100644
index 2c9aa3234f..0000000000
--- a/ci/jenkins/templates/i386_jenkinsfile.groovy.j2
+++ /dev/null
@@ -1,60 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-{% include "utils/base.groovy.j2" with context %}
-{% import 'utils/macros.j2' as m with context -%}
-
-{% call m.invoke_build(
- name='BUILD: i386',
- node='CPU',
- condition='!skip_ci && is_docs_only_build != 1',
- ws='tvm/build-i386',
- docker_image='ci_i386',
- platform="i386",
-) %}
- sh (
- script: "${docker_run} ${ci_i386}
./tests/scripts/task_config_build_i386.sh build",
- label: 'Create i386 cmake config',
- )
- cmake_build(ci_i386, 'build')
- make_cpp_tests(ci_i386, 'build')
- {{ m.upload_artifacts(tag='i386', filenames=tvm_lib + cpptest) }}
-{% endcall %}
-
-
-{% set test_method_names = [] %}
-
-{% call(shard_index, num_shards) m.sharded_test_step(
- name="python: i386",
- num_shards=3,
- ws="tvm/integration-python-i386",
- platform="i386",
- docker_image="ci_i386",
- test_method_names=test_method_names,
-) %}
- {{ m.download_artifacts(tag='i386') }}
- ci_setup(ci_i386)
- {% if shard_index == 1 %}
- cpp_unittest(ci_i386)
- {% endif %}
- python_unittest(ci_i386)
- sh (
- script: "${docker_run} ${ci_i386}
./tests/scripts/task_python_integration_i386only.sh",
- label: 'Run i386 integration tests',
- )
-{% endcall %}
-
-{{ m.invoke_tests(node="CPU-SMALL", test_method_names=test_method_names) -}}
diff --git a/ci/jenkins/templates/utils/base.groovy.j2
b/ci/jenkins/templates/utils/base.groovy.j2
index 68395d05a9..38c56bc9fb 100644
--- a/ci/jenkins/templates/utils/base.groovy.j2
+++ b/ci/jenkins/templates/utils/base.groovy.j2
@@ -57,10 +57,8 @@ ci_gpu = ''
ci_cpu = ''
ci_minimal = ''
ci_wasm = ''
-ci_i386 = ''
ci_cortexm = ''
ci_arm = ''
-ci_hexagon = ''
ci_riscv = ''
// Parameters to allow overriding (in Jenkins UI), the images
diff --git a/ci/scripts/github/github_tvmbot.py
b/ci/scripts/github/github_tvmbot.py
index 47f1e699a7..b7ba6e0495 100755
--- a/ci/scripts/github/github_tvmbot.py
+++ b/ci/scripts/github/github_tvmbot.py
@@ -535,8 +535,6 @@ class PR:
"tvm-cpu",
"tvm-docker",
"tvm-gpu",
- "tvm-hexagon",
- "tvm-i386",
"tvm-lint",
"tvm-wasm",
"tvm-unity",
diff --git a/ci/scripts/github/update_branch.py
b/ci/scripts/github/update_branch.py
index b3fa014137..64d80eb9d5 100755
--- a/ci/scripts/github/update_branch.py
+++ b/ci/scripts/github/update_branch.py
@@ -90,12 +90,10 @@ def commits_query(user: str, repo: str, cursor: str = None):
EXPECTED_CI_JOBS = [
"cross-isa-minimal/branch",
"gpu/branch",
- "hexagon/branch",
"arm/branch",
"cortexm/branch",
"cpu/branch",
"docker/branch",
- "i386/branch",
"lint/branch",
"minimal/branch",
"riscv/branch",
diff --git a/tests/python/ci/test_ci.py b/tests/python/ci/test_ci.py
index ca749665c9..bb5a98a299 100644
--- a/tests/python/ci/test_ci.py
+++ b/tests/python/ci/test_ci.py
@@ -1236,8 +1236,6 @@ def test_open_docker_update_pr(
"ci_cortexm",
"ci_cpu",
"ci_gpu",
- "ci_hexagon",
- "ci_i386",
"ci_lint",
"ci_minimal",
"ci_riscv",
diff --git a/tests/python/codegen/test_target_codegen_cross_llvm.py
b/tests/python/codegen/test_target_codegen_cross_llvm.py
index 50220993a2..224924ff72 100644
--- a/tests/python/codegen/test_target_codegen_cross_llvm.py
+++ b/tests/python/codegen/test_target_codegen_cross_llvm.py
@@ -55,14 +55,6 @@ def test_llvm_add_pipeline():
endian = "<" if endian == 1 else ">"
assert struct.unpack(endian + "h", arr[0x12:0x14])[0] == e_machine
- def build_i386():
- temp = utils.tempdir()
- target = "llvm -mtriple=i386-pc-linux-gnu"
- f = tvm.tir.build(AddModule, target=target)
- path = temp.relpath("myadd.o")
- f.write_to_file(path)
- verify_elf(path, 0x03)
-
def build_arm():
target = "llvm -mtriple=armv7-none-linux-gnueabihf"
if not tvm.runtime.enabled(target):
@@ -97,7 +89,6 @@ def test_llvm_add_pipeline():
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
print("Verification finish on remote..")
- build_i386()
build_arm()
diff --git a/tests/python/codegen/test_target_codegen_x86.py
b/tests/python/codegen/test_target_codegen_x86.py
index cb3856e61c..3d68155c9f 100644
--- a/tests/python/codegen/test_target_codegen_x86.py
+++ b/tests/python/codegen/test_target_codegen_x86.py
@@ -25,8 +25,8 @@ from tvm.script import tir as T, ir as I
llvm_version = tvm.target.codegen.llvm_version_major()
machine = platform.machine()
-if machine not in ["i386", "x86_64", "AMD64", "amd64"]:
- pytest.skip(f"Requires x86_64/i386, but machine is {machine}",
allow_module_level=True)
+if machine not in ["x86_64", "AMD64", "amd64"]:
+ pytest.skip(f"Requires x86_64, but machine is {machine}",
allow_module_level=True)
@tvm.testing.requires_llvm
diff --git a/tests/scripts/ci.py b/tests/scripts/ci.py
index 96f0a5f7ed..676c46b933 100755
--- a/tests/scripts/ci.py
+++ b/tests/scripts/ci.py
@@ -171,10 +171,8 @@ def docker(
"ci_gpu",
"ci_cpu",
# "ci_wasm",
- # "ci_i386",
"ci_cortexm",
"ci_arm",
- "ci_hexagon",
"ci_riscv",
"ci_adreno",
}
@@ -631,20 +629,6 @@ generated = [
),
},
),
- generate_command(
- name="i386",
- help="Run i386 build and test(s)",
- options={
- "cpp": CPP_UNITTEST,
- "integration": (
- "run integration tests",
- [
- "./tests/scripts/task_python_unittest.sh",
- "./tests/scripts/task_python_integration_i386only.sh",
- ],
- ),
- },
- ),
generate_command(
name="wasm",
help="Run WASM build and test(s)",
diff --git a/tests/scripts/task_build.py b/tests/scripts/task_build.py
index 2f9f91df8a..4fe776f509 100755
--- a/tests/scripts/task_build.py
+++ b/tests/scripts/task_build.py
@@ -85,13 +85,10 @@ if __name__ == "__main__":
available_cpus = nproc // executors
num_cpus = max(available_cpus, 1)
- if build_platform == "i386":
- sh.run("cmake ..", cwd=build_dir)
+ if args.debug:
+ sh.run("cmake -GNinja -DCMAKE_BUILD_TYPE=Debug ..", cwd=build_dir)
else:
- if args.debug:
- sh.run("cmake -GNinja -DCMAKE_BUILD_TYPE=Debug ..", cwd=build_dir)
- else:
- sh.run("cmake -GNinja -DCMAKE_BUILD_TYPE=RelWithDebInfo ..",
cwd=build_dir)
+ sh.run("cmake -GNinja -DCMAKE_BUILD_TYPE=RelWithDebInfo ..",
cwd=build_dir)
target = ""
if args.cmake_target:
@@ -102,13 +99,7 @@ if __name__ == "__main__":
if verbose:
ninja_args.append("-v")
- if build_platform == "i386":
- if args.cmake_target:
- sh.run(f"make {args.cmake_target} -j{num_cpus}", cwd=build_dir)
- else:
- sh.run(f"make -j{num_cpus}", cwd=build_dir)
- else:
- sh.run(f"cmake --build . -- " + " ".join(ninja_args), cwd=build_dir)
+ sh.run("cmake --build . -- " + " ".join(ninja_args), cwd=build_dir)
if use_sccache:
logging.info("===== sccache stats =====")
diff --git a/tests/scripts/task_python_integration_i386only.sh
b/tests/scripts/task_python_integration_i386only.sh
deleted file mode 100755
index f4fe311671..0000000000
--- a/tests/scripts/task_python_integration_i386only.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-set -euxo pipefail
-
-
-export TVM_INTEGRATION_I386_ONLY=1
-
-./tests/scripts/task_python_integration.sh