This is an automated email from the ASF dual-hosted git repository.

areusch pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new c0f4bf72b6 [ci] Redirect sphinx-gallery URLs to S3 (#11839)
c0f4bf72b6 is described below

commit c0f4bf72b6ee30648ef78ce865afc733c95fe98c
Author: driazati <[email protected]>
AuthorDate: Thu Jun 30 09:10:12 2022 -0700

    [ci] Redirect sphinx-gallery URLs to S3 (#11839)
    
    Co-authored-by: driazati <[email protected]>
---
 gallery/how_to/compile_models/from_coreml.py       |  6 ++
 gallery/how_to/compile_models/from_darknet.py      |  6 ++
 gallery/how_to/compile_models/from_keras.py        |  6 ++
 gallery/how_to/compile_models/from_mxnet.py        |  6 ++
 gallery/how_to/compile_models/from_oneflow.py      |  6 ++
 gallery/how_to/compile_models/from_onnx.py         |  6 ++
 gallery/how_to/compile_models/from_paddle.py       |  6 ++
 gallery/how_to/compile_models/from_pytorch.py      |  6 ++
 gallery/how_to/compile_models/from_tensorflow.py   |  6 ++
 gallery/how_to/compile_models/from_tflite.py       |  6 ++
 .../deploy_models/deploy_model_on_android.py       |  6 ++
 .../how_to/deploy_models/deploy_model_on_rasp.py   |  6 ++
 .../deploy_object_detection_pytorch.py             |  6 ++
 .../how_to/deploy_models/deploy_prequantized.py    |  6 ++
 .../deploy_models/deploy_prequantized_tflite.py    |  6 ++
 gallery/how_to/deploy_models/deploy_quantized.py   |  6 ++
 gallery/how_to/deploy_models/deploy_sparse.py      |  6 ++
 gallery/how_to/deploy_models/deploy_ssd_gluoncv.py |  6 ++
 .../how_to/extend_tvm/bring_your_own_datatypes.py  |  6 ++
 gallery/how_to/extend_tvm/low_level_custom_pass.py |  6 ++
 gallery/how_to/extend_tvm/use_pass_infra.py        |  6 ++
 gallery/how_to/extend_tvm/use_pass_instrument.py   |  6 ++
 gallery/how_to/optimize_operators/opt_conv_cuda.py |  6 ++
 .../optimize_operators/opt_conv_tensorcore.py      |  6 ++
 gallery/how_to/optimize_operators/opt_gemm.py      |  6 ++
 .../tune_conv2d_layer_cuda.py                      |  6 ++
 .../tune_with_autoscheduler/tune_network_arm.py    |  6 ++
 .../tune_with_autoscheduler/tune_network_cuda.py   |  6 ++
 .../tune_with_autoscheduler/tune_network_mali.py   |  6 ++
 .../tune_with_autoscheduler/tune_network_x86.py    |  6 ++
 .../tune_with_autoscheduler/tune_sparse_x86.py     |  6 ++
 .../how_to/tune_with_autotvm/tune_conv2d_cuda.py   |  6 ++
 gallery/how_to/tune_with_autotvm/tune_relay_arm.py |  6 ++
 .../how_to/tune_with_autotvm/tune_relay_cuda.py    |  6 ++
 .../tune_with_autotvm/tune_relay_mobile_gpu.py     |  6 ++
 gallery/how_to/tune_with_autotvm/tune_relay_x86.py |  6 ++
 .../how_to/work_with_microtvm/micro_autotune.py    |  6 ++
 gallery/how_to/work_with_microtvm/micro_ethosu.py  |  6 ++
 .../work_with_microtvm/micro_reference_vm.py       |  6 ++
 gallery/how_to/work_with_microtvm/micro_tflite.py  |  6 ++
 gallery/how_to/work_with_relay/build_gcn.py        |  6 ++
 .../how_to/work_with_relay/using_external_lib.py   |  6 ++
 gallery/how_to/work_with_relay/using_relay_viz.py  |  6 ++
 gallery/how_to/work_with_schedules/extern_op.py    |  6 ++
 gallery/how_to/work_with_schedules/intrin_math.py  |  8 +-
 gallery/how_to/work_with_schedules/reduction.py    |  6 ++
 gallery/how_to/work_with_schedules/scan.py         |  6 ++
 .../work_with_schedules/schedule_primitives.py     |  6 ++
 gallery/how_to/work_with_schedules/tedd.py         |  6 ++
 gallery/how_to/work_with_schedules/tensorize.py    |  6 ++
 gallery/how_to/work_with_schedules/tuple_inputs.py |  6 ++
 gallery/tutorial/auto_scheduler_matmul_x86.py      |  6 ++
 gallery/tutorial/autotvm_matmul_x86.py             |  6 ++
 gallery/tutorial/autotvm_relay_x86.py              |  6 ++
 gallery/tutorial/cross_compilation_and_rpc.py      |  6 ++
 gallery/tutorial/install.py                        |  6 ++
 gallery/tutorial/intro_topi.py                     |  6 ++
 gallery/tutorial/introduction.py                   |  6 ++
 gallery/tutorial/relay_quick_start.py              |  6 ++
 gallery/tutorial/tensor_expr_get_started.py        |  6 ++
 gallery/tutorial/tensor_ir_blitz_course.py         |  6 ++
 gallery/tutorial/tvmc_command_line_driver.py       |  6 ++
 gallery/tutorial/tvmc_python.py                    |  6 ++
 python/tvm/testing/utils.py                        | 47 +++++++++++
 tests/lint/check_request_hook.py                   | 92 ++++++++++++++++++++++
 tests/scripts/request_hook/request_hook.py         | 61 ++++++++++++++
 tests/scripts/task_lint.sh                         |  3 +
 67 files changed, 582 insertions(+), 1 deletion(-)

diff --git a/gallery/how_to/compile_models/from_coreml.py 
b/gallery/how_to/compile_models/from_coreml.py
index 98d1969f36..96d2967947 100644
--- a/gallery/how_to/compile_models/from_coreml.py
+++ b/gallery/how_to/compile_models/from_coreml.py
@@ -34,6 +34,12 @@ A quick solution is to install via pip
 or please refer to official site
 https://github.com/apple/coremltools
 """
+
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
 import tvm
 from tvm import te
 import tvm.relay as relay
diff --git a/gallery/how_to/compile_models/from_darknet.py 
b/gallery/how_to/compile_models/from_darknet.py
index 232058641a..c12a9e7e15 100644
--- a/gallery/how_to/compile_models/from_darknet.py
+++ b/gallery/how_to/compile_models/from_darknet.py
@@ -31,6 +31,12 @@ Please install CFFI and CV2 before executing this script
   pip install opencv-python
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 # numpy and matplotlib
 import numpy as np
 import matplotlib.pyplot as plt
diff --git a/gallery/how_to/compile_models/from_keras.py 
b/gallery/how_to/compile_models/from_keras.py
index 1db27799fe..895a601ada 100644
--- a/gallery/how_to/compile_models/from_keras.py
+++ b/gallery/how_to/compile_models/from_keras.py
@@ -34,6 +34,12 @@ A quick solution is to install via pip
 or please refer to official site
 https://keras.io/#installation
 """
+
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
 import tvm
 from tvm import te
 import tvm.relay as relay
diff --git a/gallery/how_to/compile_models/from_mxnet.py 
b/gallery/how_to/compile_models/from_mxnet.py
index 027e9e6eb7..3808461862 100644
--- a/gallery/how_to/compile_models/from_mxnet.py
+++ b/gallery/how_to/compile_models/from_mxnet.py
@@ -35,6 +35,12 @@ A quick solution is
 or please refer to official installation guide.
 https://mxnet.apache.org/versions/master/install/index.html
 """
+
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
 # some standard imports
 import mxnet as mx
 import tvm
diff --git a/gallery/how_to/compile_models/from_oneflow.py 
b/gallery/how_to/compile_models/from_oneflow.py
index f92f0b0f1e..eb27c4b3e3 100644
--- a/gallery/how_to/compile_models/from_oneflow.py
+++ b/gallery/how_to/compile_models/from_oneflow.py
@@ -35,6 +35,12 @@ https://github.com/Oneflow-Inc/oneflow
 
 Currently, TVM supports OneFlow 0.7.0. Other versions may be unstable.
 """
+
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
 import os, math
 from matplotlib import pyplot as plt
 import numpy as np
diff --git a/gallery/how_to/compile_models/from_onnx.py 
b/gallery/how_to/compile_models/from_onnx.py
index 586c811aa6..f0256bc7d3 100644
--- a/gallery/how_to/compile_models/from_onnx.py
+++ b/gallery/how_to/compile_models/from_onnx.py
@@ -32,6 +32,12 @@ A quick solution is to install protobuf compiler, and
 or please refer to official site.
 https://github.com/onnx/onnx
 """
+
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
 import onnx
 import numpy as np
 import tvm
diff --git a/gallery/how_to/compile_models/from_paddle.py 
b/gallery/how_to/compile_models/from_paddle.py
index 9d67cbcdf9..fecb1c48da 100644
--- a/gallery/how_to/compile_models/from_paddle.py
+++ b/gallery/how_to/compile_models/from_paddle.py
@@ -30,6 +30,12 @@ A quick solution is
 or please refer to official site.
 
https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/linux-pip.html
 """
+
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
 import tarfile
 import paddle
 import numpy as np
diff --git a/gallery/how_to/compile_models/from_pytorch.py 
b/gallery/how_to/compile_models/from_pytorch.py
index e8d0b4998f..98b531fa6d 100644
--- a/gallery/how_to/compile_models/from_pytorch.py
+++ b/gallery/how_to/compile_models/from_pytorch.py
@@ -41,6 +41,12 @@ Currently, TVM supports PyTorch 1.7 and 1.4. Other versions 
may
 be unstable.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 import tvm
 from tvm import relay
 
diff --git a/gallery/how_to/compile_models/from_tensorflow.py 
b/gallery/how_to/compile_models/from_tensorflow.py
index 4563e245c0..9a32397815 100644
--- a/gallery/how_to/compile_models/from_tensorflow.py
+++ b/gallery/how_to/compile_models/from_tensorflow.py
@@ -24,6 +24,12 @@ For us to begin with, tensorflow python module is required 
to be installed.
 Please refer to https://www.tensorflow.org/install
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 # tvm, relay
 import tvm
 from tvm import te
diff --git a/gallery/how_to/compile_models/from_tflite.py 
b/gallery/how_to/compile_models/from_tflite.py
index b720402366..712269381f 100644
--- a/gallery/how_to/compile_models/from_tflite.py
+++ b/gallery/how_to/compile_models/from_tflite.py
@@ -52,6 +52,12 @@ Now please check if TFLite package is installed 
successfully, ``python -c "impor
 
 Below you can find an example on how to compile TFLite model using TVM.
 """
+
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
 ######################################################################
 # Utils for downloading and extracting zip files
 # ----------------------------------------------
diff --git a/gallery/how_to/deploy_models/deploy_model_on_android.py 
b/gallery/how_to/deploy_models/deploy_model_on_android.py
index c7b610d5d5..10e108239e 100644
--- a/gallery/how_to/deploy_models/deploy_model_on_android.py
+++ b/gallery/how_to/deploy_models/deploy_model_on_android.py
@@ -25,6 +25,12 @@ Deploy the Pretrained Model on Android
 This is an example of using Relay to compile a keras model and deploy it on 
Android device.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 import os
 import numpy as np
 from PIL import Image
diff --git a/gallery/how_to/deploy_models/deploy_model_on_rasp.py 
b/gallery/how_to/deploy_models/deploy_model_on_rasp.py
index de4ed9aff0..ab5374d93d 100644
--- a/gallery/how_to/deploy_models/deploy_model_on_rasp.py
+++ b/gallery/how_to/deploy_models/deploy_model_on_rasp.py
@@ -26,6 +26,12 @@ This is an example of using Relay to compile a ResNet model 
and deploy
 it on Raspberry Pi.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 import tvm
 from tvm import te
 import tvm.relay as relay
diff --git a/gallery/how_to/deploy_models/deploy_object_detection_pytorch.py 
b/gallery/how_to/deploy_models/deploy_object_detection_pytorch.py
index b5b0e4acf1..0d8d0f2867 100644
--- a/gallery/how_to/deploy_models/deploy_object_detection_pytorch.py
+++ b/gallery/how_to/deploy_models/deploy_object_detection_pytorch.py
@@ -40,6 +40,12 @@ Currently, TVM supports PyTorch 1.7 and 1.4. Other versions 
may
 be unstable.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 import tvm
 from tvm import relay
 from tvm import relay
diff --git a/gallery/how_to/deploy_models/deploy_prequantized.py 
b/gallery/how_to/deploy_models/deploy_prequantized.py
index caee2b3b41..fdb4de289d 100644
--- a/gallery/how_to/deploy_models/deploy_prequantized.py
+++ b/gallery/how_to/deploy_models/deploy_prequantized.py
@@ -28,6 +28,12 @@ Here, we demonstrate how to load and run models quantized by 
PyTorch, MXNet, and
 Once loaded, we can run compiled, quantized models on any hardware TVM 
supports.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 
#################################################################################
 # First, necessary imports
 from PIL import Image
diff --git a/gallery/how_to/deploy_models/deploy_prequantized_tflite.py 
b/gallery/how_to/deploy_models/deploy_prequantized_tflite.py
index 830e2ab074..494b4a9e21 100644
--- a/gallery/how_to/deploy_models/deploy_prequantized_tflite.py
+++ b/gallery/how_to/deploy_models/deploy_prequantized_tflite.py
@@ -42,6 +42,12 @@ Now please check if TFLite package is installed 
successfully, ``python -c "impor
 
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 ###############################################################################
 # Necessary imports
 # -----------------
diff --git a/gallery/how_to/deploy_models/deploy_quantized.py 
b/gallery/how_to/deploy_models/deploy_quantized.py
index 2d9275796e..24c7ce3331 100644
--- a/gallery/how_to/deploy_models/deploy_quantized.py
+++ b/gallery/how_to/deploy_models/deploy_quantized.py
@@ -27,6 +27,12 @@ In this tutorial, we will import a GluonCV pre-trained model 
on ImageNet to
 Relay, quantize the Relay model and then perform the inference.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 import tvm
 from tvm import te
 from tvm import relay
diff --git a/gallery/how_to/deploy_models/deploy_sparse.py 
b/gallery/how_to/deploy_models/deploy_sparse.py
index 56a5f1aafd..b9a26e0d30 100644
--- a/gallery/how_to/deploy_models/deploy_sparse.py
+++ b/gallery/how_to/deploy_models/deploy_sparse.py
@@ -70,6 +70,12 @@ sparsity. A fun exercise is comparing the real speed of 
PruneBert with the block
 sparse speed using fake weights to see the benefit of structured sparsity.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 ###############################################################################
 # Load Required Modules
 # ---------------------
diff --git a/gallery/how_to/deploy_models/deploy_ssd_gluoncv.py 
b/gallery/how_to/deploy_models/deploy_ssd_gluoncv.py
index ebe18670c6..f39244a2eb 100644
--- a/gallery/how_to/deploy_models/deploy_ssd_gluoncv.py
+++ b/gallery/how_to/deploy_models/deploy_ssd_gluoncv.py
@@ -23,6 +23,12 @@ Deploy Single Shot Multibox Detector(SSD) model
 This article is an introductory tutorial to deploy SSD models with TVM.
 We will use GluonCV pre-trained SSD model and convert it to Relay IR
 """
+
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
 import tvm
 from tvm import te
 
diff --git a/gallery/how_to/extend_tvm/bring_your_own_datatypes.py 
b/gallery/how_to/extend_tvm/bring_your_own_datatypes.py
index 1a48781e24..479269a224 100644
--- a/gallery/how_to/extend_tvm/bring_your_own_datatypes.py
+++ b/gallery/how_to/extend_tvm/bring_your_own_datatypes.py
@@ -52,6 +52,12 @@ If you would like to try this with your own datatype 
library, first bring the li
     ctypes.CDLL('my-datatype-lib.so', ctypes.RTLD_GLOBAL)
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 ######################
 # A Simple TVM Program
 # --------------------
diff --git a/gallery/how_to/extend_tvm/low_level_custom_pass.py 
b/gallery/how_to/extend_tvm/low_level_custom_pass.py
index ee96d8220c..0f99c72cee 100644
--- a/gallery/how_to/extend_tvm/low_level_custom_pass.py
+++ b/gallery/how_to/extend_tvm/low_level_custom_pass.py
@@ -40,6 +40,12 @@ Before reading this tutorial, we assume readers have already 
known these topics
   take a look at ``python/tvm/build_module.py`` to get some basics.
 
 """
+
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
 import tvm
 from tvm import te
 import numpy as np
diff --git a/gallery/how_to/extend_tvm/use_pass_infra.py 
b/gallery/how_to/extend_tvm/use_pass_infra.py
index e38383e690..a41a26fc0b 100644
--- a/gallery/how_to/extend_tvm/use_pass_infra.py
+++ b/gallery/how_to/extend_tvm/use_pass_infra.py
@@ -40,6 +40,12 @@ a certain optimization and create an optimization pipeline 
for a Relay program.
 The same approach can be used for tir as well.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 import numpy as np
 import tvm
 from tvm import te
diff --git a/gallery/how_to/extend_tvm/use_pass_instrument.py 
b/gallery/how_to/extend_tvm/use_pass_instrument.py
index 036aa63e37..3079e2f0e7 100644
--- a/gallery/how_to/extend_tvm/use_pass_instrument.py
+++ b/gallery/how_to/extend_tvm/use_pass_instrument.py
@@ -33,6 +33,12 @@ but an extension mechanism is available via the 
:py:func:`tvm.instrument.pass_in
 This tutorial demonstrates how developers can use ``PassContext`` to instrument
 passes. Please also refer to the :ref:`pass-infra`.
 """
+
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
 import tvm
 import tvm.relay as relay
 from tvm.relay.testing import resnet
diff --git a/gallery/how_to/optimize_operators/opt_conv_cuda.py 
b/gallery/how_to/optimize_operators/opt_conv_cuda.py
index 3d2caa0d31..e5b452af66 100644
--- a/gallery/how_to/optimize_operators/opt_conv_cuda.py
+++ b/gallery/how_to/optimize_operators/opt_conv_cuda.py
@@ -30,6 +30,12 @@ channel, batch.
 
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 ################################################################
 # Preparation and Algorithm
 # -------------------------
diff --git a/gallery/how_to/optimize_operators/opt_conv_tensorcore.py 
b/gallery/how_to/optimize_operators/opt_conv_tensorcore.py
index ccfc7b9743..4cc2b40b7b 100644
--- a/gallery/how_to/optimize_operators/opt_conv_tensorcore.py
+++ b/gallery/how_to/optimize_operators/opt_conv_tensorcore.py
@@ -27,6 +27,12 @@ convolution has a large batch. We strongly recommend 
covering the :ref:`opt-conv
 
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 ################################################################
 # TensorCore Introduction
 # -----------------------
diff --git a/gallery/how_to/optimize_operators/opt_gemm.py 
b/gallery/how_to/optimize_operators/opt_gemm.py
index 920d7a87fa..d2ec711c2b 100644
--- a/gallery/how_to/optimize_operators/opt_gemm.py
+++ b/gallery/how_to/optimize_operators/opt_gemm.py
@@ -48,6 +48,12 @@ All the experiment results mentioned below, are executed on 
2015's 15' MacBook e
 Intel i7-4770HQ CPU. The cache line size should be 64 bytes for all the x86 
CPUs.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 
################################################################################################
 # Preparation and Baseline
 # ------------------------
diff --git a/gallery/how_to/tune_with_autoscheduler/tune_conv2d_layer_cuda.py 
b/gallery/how_to/tune_with_autoscheduler/tune_conv2d_layer_cuda.py
index a4f7e22d89..5d173e3812 100644
--- a/gallery/how_to/tune_with_autoscheduler/tune_conv2d_layer_cuda.py
+++ b/gallery/how_to/tune_with_autoscheduler/tune_conv2d_layer_cuda.py
@@ -37,6 +37,12 @@ get it to run, you will need to wrap the body of this 
tutorial in a :code:`if
 __name__ == "__main__":` block.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 import os
 
 import numpy as np
diff --git a/gallery/how_to/tune_with_autoscheduler/tune_network_arm.py 
b/gallery/how_to/tune_with_autoscheduler/tune_network_arm.py
index 9c5820c991..09a1d0cea5 100644
--- a/gallery/how_to/tune_with_autoscheduler/tune_network_arm.py
+++ b/gallery/how_to/tune_with_autoscheduler/tune_network_arm.py
@@ -46,6 +46,12 @@ get it to run, you will need to wrap the body of this 
tutorial in a :code:`if
 __name__ == "__main__":` block.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 import numpy as np
 import os
 
diff --git a/gallery/how_to/tune_with_autoscheduler/tune_network_cuda.py 
b/gallery/how_to/tune_with_autoscheduler/tune_network_cuda.py
index b403c0aa84..cc29f27ba2 100644
--- a/gallery/how_to/tune_with_autoscheduler/tune_network_cuda.py
+++ b/gallery/how_to/tune_with_autoscheduler/tune_network_cuda.py
@@ -44,6 +44,12 @@ get it to run, you will need to wrap the body of this 
tutorial in a :code:`if
 __name__ == "__main__":` block.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 import numpy as np
 
 import tvm
diff --git a/gallery/how_to/tune_with_autoscheduler/tune_network_mali.py 
b/gallery/how_to/tune_with_autoscheduler/tune_network_mali.py
index 2d1e515209..8ac0b235d7 100644
--- a/gallery/how_to/tune_with_autoscheduler/tune_network_mali.py
+++ b/gallery/how_to/tune_with_autoscheduler/tune_network_mali.py
@@ -44,6 +44,12 @@ get it to run, you will need to wrap the body of this 
tutorial in a :code:`if
 __name__ == "__main__":` block.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 import numpy as np
 
 import tvm
diff --git a/gallery/how_to/tune_with_autoscheduler/tune_network_x86.py 
b/gallery/how_to/tune_with_autoscheduler/tune_network_x86.py
index 6cb8d6f14c..5a321104c8 100644
--- a/gallery/how_to/tune_with_autoscheduler/tune_network_x86.py
+++ b/gallery/how_to/tune_with_autoscheduler/tune_network_x86.py
@@ -45,6 +45,12 @@ get it to run, you will need to wrap the body of this 
tutorial in a :code:`if
 __name__ == "__main__":` block.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 import numpy as np
 
 import tvm
diff --git a/gallery/how_to/tune_with_autoscheduler/tune_sparse_x86.py 
b/gallery/how_to/tune_with_autoscheduler/tune_sparse_x86.py
index 55ee76ef6c..0a2ddbd1bd 100644
--- a/gallery/how_to/tune_with_autoscheduler/tune_sparse_x86.py
+++ b/gallery/how_to/tune_with_autoscheduler/tune_sparse_x86.py
@@ -35,6 +35,12 @@ get it to run, you will need to wrap the body of this 
tutorial in a :code:`if
 __name__ == "__main__":` block.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 import os
 
 import numpy as np
diff --git a/gallery/how_to/tune_with_autotvm/tune_conv2d_cuda.py 
b/gallery/how_to/tune_with_autotvm/tune_conv2d_cuda.py
index e3072773bf..95d6dcb0a1 100644
--- a/gallery/how_to/tune_with_autotvm/tune_conv2d_cuda.py
+++ b/gallery/how_to/tune_with_autotvm/tune_conv2d_cuda.py
@@ -28,6 +28,12 @@ get it to run, you will need to wrap the body of this 
tutorial in a :code:`if
 __name__ == "__main__":` block.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 ######################################################################
 # Install dependencies
 # --------------------
diff --git a/gallery/how_to/tune_with_autotvm/tune_relay_arm.py 
b/gallery/how_to/tune_with_autotvm/tune_relay_arm.py
index f072c5ddac..ab278021d2 100644
--- a/gallery/how_to/tune_with_autotvm/tune_relay_arm.py
+++ b/gallery/how_to/tune_with_autotvm/tune_relay_arm.py
@@ -41,6 +41,12 @@ get it to run, you will need to wrap the body of this 
tutorial in a :code:`if
 __name__ == "__main__":` block.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 ######################################################################
 # Install dependencies
 # --------------------
diff --git a/gallery/how_to/tune_with_autotvm/tune_relay_cuda.py 
b/gallery/how_to/tune_with_autotvm/tune_relay_cuda.py
index b2af2e13f4..459b2798c2 100644
--- a/gallery/how_to/tune_with_autotvm/tune_relay_cuda.py
+++ b/gallery/how_to/tune_with_autotvm/tune_relay_cuda.py
@@ -39,6 +39,12 @@ get it to run, you will need to wrap the body of this 
tutorial in a :code:`if
 __name__ == "__main__":` block.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 ######################################################################
 # Install dependencies
 # --------------------
diff --git a/gallery/how_to/tune_with_autotvm/tune_relay_mobile_gpu.py 
b/gallery/how_to/tune_with_autotvm/tune_relay_mobile_gpu.py
index d3f4ec62fa..5a4f0c56d2 100644
--- a/gallery/how_to/tune_with_autotvm/tune_relay_mobile_gpu.py
+++ b/gallery/how_to/tune_with_autotvm/tune_relay_mobile_gpu.py
@@ -39,6 +39,12 @@ get it to run, you will need to wrap the body of this 
tutorial in a :code:`if
 __name__ == "__main__":` block.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 ######################################################################
 # Install dependencies
 # --------------------
diff --git a/gallery/how_to/tune_with_autotvm/tune_relay_x86.py 
b/gallery/how_to/tune_with_autotvm/tune_relay_x86.py
index 771220bb33..6e46fbd8ff 100644
--- a/gallery/how_to/tune_with_autotvm/tune_relay_x86.py
+++ b/gallery/how_to/tune_with_autotvm/tune_relay_x86.py
@@ -28,6 +28,12 @@ Note that this tutorial will not run on Windows or recent 
versions of macOS. To
 get it to run, you will need to wrap the body of this tutorial in a :code:`if
 __name__ == "__main__":` block.
 """
+
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
 import os
 import numpy as np
 
diff --git a/gallery/how_to/work_with_microtvm/micro_autotune.py 
b/gallery/how_to/work_with_microtvm/micro_autotune.py
index 613d92e141..58c52508b7 100644
--- a/gallery/how_to/work_with_microtvm/micro_autotune.py
+++ b/gallery/how_to/work_with_microtvm/micro_autotune.py
@@ -27,6 +27,12 @@ Autotuning with microTVM
 This tutorial explains how to autotune a model using the C runtime.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 import os
 import json
 import numpy as np
diff --git a/gallery/how_to/work_with_microtvm/micro_ethosu.py 
b/gallery/how_to/work_with_microtvm/micro_ethosu.py
index f55fad71dd..8e37a0ea5e 100644
--- a/gallery/how_to/work_with_microtvm/micro_ethosu.py
+++ b/gallery/how_to/work_with_microtvm/micro_ethosu.py
@@ -37,6 +37,12 @@ In this tutorial, we will be compiling a MobileNet v1 model 
and instructing
 TVM to offload operators to the Ethos(TM)-U55 where possible.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 
################################################################################
 # Obtaining TVM
 # -------------
diff --git a/gallery/how_to/work_with_microtvm/micro_reference_vm.py 
b/gallery/how_to/work_with_microtvm/micro_reference_vm.py
index 9eacd9a963..b87a726564 100644
--- a/gallery/how_to/work_with_microtvm/micro_reference_vm.py
+++ b/gallery/how_to/work_with_microtvm/micro_reference_vm.py
@@ -157,3 +157,9 @@ local QEMU emulator running within the VM, run the 
following commands instead:
 
 
 """
+
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
diff --git a/gallery/how_to/work_with_microtvm/micro_tflite.py 
b/gallery/how_to/work_with_microtvm/micro_tflite.py
index 3d871ba783..dfe33eedac 100644
--- a/gallery/how_to/work_with_microtvm/micro_tflite.py
+++ b/gallery/how_to/work_with_microtvm/micro_tflite.py
@@ -25,6 +25,12 @@ This tutorial is an introduction to working with microTVM 
and a TFLite
 model with Relay.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 ######################################################################
 # .. note::
 #     If you want to run this tutorial on the microTVM Reference VM, download 
the Jupyter
diff --git a/gallery/how_to/work_with_relay/build_gcn.py 
b/gallery/how_to/work_with_relay/build_gcn.py
index fcffbd77ff..8953ffc2e4 100644
--- a/gallery/how_to/work_with_relay/build_gcn.py
+++ b/gallery/how_to/work_with_relay/build_gcn.py
@@ -118,6 +118,12 @@ infeat_dim: int
 num_classes: int
     dimension of model output (Number of classes)
 """
+
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
 dataset = "cora"
 g, data = load_dataset(dataset)
 
diff --git a/gallery/how_to/work_with_relay/using_external_lib.py 
b/gallery/how_to/work_with_relay/using_external_lib.py
index 8b6957d1db..c018ee13c7 100644
--- a/gallery/how_to/work_with_relay/using_external_lib.py
+++ b/gallery/how_to/work_with_relay/using_external_lib.py
@@ -31,6 +31,12 @@ For example, to use cuDNN, USE_CUDNN option in 
`cmake/config.cmake` needs to be
 
 To begin with, we import Relay and TVM.
 """
+
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
 import tvm
 from tvm import te
 import numpy as np
diff --git a/gallery/how_to/work_with_relay/using_relay_viz.py 
b/gallery/how_to/work_with_relay/using_relay_viz.py
index b0132f40b9..2e68ce9028 100644
--- a/gallery/how_to/work_with_relay/using_relay_viz.py
+++ b/gallery/how_to/work_with_relay/using_relay_viz.py
@@ -35,6 +35,12 @@ We will introduce how to implement customized parsers and 
renderers through inte
 
 For more details, please refer to :py:mod:`tvm.contrib.relay_viz`.
 """
+
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
 from typing import (
     Dict,
     Union,
diff --git a/gallery/how_to/work_with_schedules/extern_op.py 
b/gallery/how_to/work_with_schedules/extern_op.py
index a0aa5d7245..ad741a08d5 100644
--- a/gallery/how_to/work_with_schedules/extern_op.py
+++ b/gallery/how_to/work_with_schedules/extern_op.py
@@ -31,6 +31,12 @@ or pointer to DLTensor as argument.
 """
 from __future__ import absolute_import, print_function
 
+
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
 import tvm
 from tvm import te
 import numpy as np
diff --git a/gallery/how_to/work_with_schedules/intrin_math.py 
b/gallery/how_to/work_with_schedules/intrin_math.py
index 535563bfb5..5a8732abd7 100644
--- a/gallery/how_to/work_with_schedules/intrin_math.py
+++ b/gallery/how_to/work_with_schedules/intrin_math.py
@@ -29,7 +29,13 @@ how we can invoke these target specific functions, and how 
we can unify
 the interface via TVM's intrinsic API.
 """
 from __future__ import absolute_import, print_function
-import numpy as np
+
+
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignoreimport numpy as np
 
 import tvm
 from tvm import te
diff --git a/gallery/how_to/work_with_schedules/reduction.py 
b/gallery/how_to/work_with_schedules/reduction.py
index 164f36dafc..432e9cd143 100644
--- a/gallery/how_to/work_with_schedules/reduction.py
+++ b/gallery/how_to/work_with_schedules/reduction.py
@@ -27,6 +27,12 @@ In this tutorial, we will demonstrate how to do reduction in 
TVM.
 """
 from __future__ import absolute_import, print_function
 
+
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
 import tvm
 import tvm.testing
 from tvm import te
diff --git a/gallery/how_to/work_with_schedules/scan.py 
b/gallery/how_to/work_with_schedules/scan.py
index 3f3d7e91ee..d21673acd9 100644
--- a/gallery/how_to/work_with_schedules/scan.py
+++ b/gallery/how_to/work_with_schedules/scan.py
@@ -24,6 +24,12 @@ Recurrent computing is a typical pattern in neural networks.
 """
 from __future__ import absolute_import, print_function
 
+
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
 import tvm
 import tvm.testing
 from tvm import te
diff --git a/gallery/how_to/work_with_schedules/schedule_primitives.py 
b/gallery/how_to/work_with_schedules/schedule_primitives.py
index 65fdeda57c..af67ed1527 100644
--- a/gallery/how_to/work_with_schedules/schedule_primitives.py
+++ b/gallery/how_to/work_with_schedules/schedule_primitives.py
@@ -28,6 +28,12 @@ various primitives provided by TVM.
 """
 from __future__ import absolute_import, print_function
 
+
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
 import tvm
 from tvm import te
 import numpy as np
diff --git a/gallery/how_to/work_with_schedules/tedd.py 
b/gallery/how_to/work_with_schedules/tedd.py
index 34ad43c220..7cb24f4335 100644
--- a/gallery/how_to/work_with_schedules/tedd.py
+++ b/gallery/how_to/work_with_schedules/tedd.py
@@ -37,6 +37,12 @@ TEDD renders these three graphs from a given schedule.  This 
tutorial demonstrat
 how to use TEDD and how to interpret the rendered graphs.
 
 """
+
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
 import tvm
 from tvm import te
 from tvm import topi
diff --git a/gallery/how_to/work_with_schedules/tensorize.py 
b/gallery/how_to/work_with_schedules/tensorize.py
index 40e68074ad..45eaf349f3 100644
--- a/gallery/how_to/work_with_schedules/tensorize.py
+++ b/gallery/how_to/work_with_schedules/tensorize.py
@@ -34,6 +34,12 @@ and usage of tensorize instead of providing an efficient 
solution.
 """
 from __future__ import absolute_import, print_function
 
+
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
 import tvm
 from tvm import te
 import tvm.testing
diff --git a/gallery/how_to/work_with_schedules/tuple_inputs.py 
b/gallery/how_to/work_with_schedules/tuple_inputs.py
index 73db7b90a7..86ec8b2d19 100644
--- a/gallery/how_to/work_with_schedules/tuple_inputs.py
+++ b/gallery/how_to/work_with_schedules/tuple_inputs.py
@@ -27,6 +27,12 @@ In this tutorial, we will introduce the usage of tuple 
inputs in TVM.
 """
 from __future__ import absolute_import, print_function
 
+
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
 import tvm
 from tvm import te
 import numpy as np
diff --git a/gallery/tutorial/auto_scheduler_matmul_x86.py 
b/gallery/tutorial/auto_scheduler_matmul_x86.py
index b9f89f6723..279987f00d 100644
--- a/gallery/tutorial/auto_scheduler_matmul_x86.py
+++ b/gallery/tutorial/auto_scheduler_matmul_x86.py
@@ -38,6 +38,12 @@ We use matrix multiplication as an example in this tutorial.
   __name__ == "__main__":` block.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 import os
 
 import numpy as np
diff --git a/gallery/tutorial/autotvm_matmul_x86.py 
b/gallery/tutorial/autotvm_matmul_x86.py
index b84a6193cd..ebdbacb221 100644
--- a/gallery/tutorial/autotvm_matmul_x86.py
+++ b/gallery/tutorial/autotvm_matmul_x86.py
@@ -45,6 +45,12 @@ workflow is illustrated by a matrix multiplication example.
   :code:`if __name__ == "__main__":` block.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 
################################################################################
 # Install dependencies
 # --------------------
diff --git a/gallery/tutorial/autotvm_relay_x86.py 
b/gallery/tutorial/autotvm_relay_x86.py
index 4e5714a6db..b7dfbe28f4 100644
--- a/gallery/tutorial/autotvm_relay_x86.py
+++ b/gallery/tutorial/autotvm_relay_x86.py
@@ -42,6 +42,12 @@ The goal of this section is to give you an overview of TVM's 
capabilites and
 how to use them through the Python API.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 
################################################################################
 # TVM is a deep learning compiler framework, with a number of different modules
 # available for working with deep learning models and operators. In this
diff --git a/gallery/tutorial/cross_compilation_and_rpc.py 
b/gallery/tutorial/cross_compilation_and_rpc.py
index 25208369f7..3f74899f7b 100644
--- a/gallery/tutorial/cross_compilation_and_rpc.py
+++ b/gallery/tutorial/cross_compilation_and_rpc.py
@@ -31,6 +31,12 @@ platforms. In this tutorial, we will use the Raspberry Pi 
for a CPU example
 and the Firefly-RK3399 for an OpenCL example.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 ######################################################################
 # Build TVM Runtime on Device
 # ---------------------------
diff --git a/gallery/tutorial/install.py b/gallery/tutorial/install.py
index 0eb3ccc94c..a499b03794 100644
--- a/gallery/tutorial/install.py
+++ b/gallery/tutorial/install.py
@@ -28,6 +28,12 @@ methods for installing TVM. These include:
 * Installing from third-party binary package.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 
################################################################################
 # Installing From Source
 # ----------------------
diff --git a/gallery/tutorial/intro_topi.py b/gallery/tutorial/intro_topi.py
index 17fa3ff370..e10a74c849 100644
--- a/gallery/tutorial/intro_topi.py
+++ b/gallery/tutorial/intro_topi.py
@@ -26,6 +26,12 @@ TOPI provides numpy-style generic operations and schedules 
with higher abstracti
 In this tutorial, we will see how TOPI can save us from writing boilerplate 
code in TVM.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 import tvm
 import tvm.testing
 from tvm import te
diff --git a/gallery/tutorial/introduction.py b/gallery/tutorial/introduction.py
index 5fe4b4e5f7..908a8e52c7 100644
--- a/gallery/tutorial/introduction.py
+++ b/gallery/tutorial/introduction.py
@@ -45,6 +45,12 @@ Contents
 #. :doc:`Compiling Deep Learning Models for GPUs <relay_quick_start>`
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 
################################################################################
 # An Overview of TVM and Model Optimization
 # =========================================
diff --git a/gallery/tutorial/relay_quick_start.py 
b/gallery/tutorial/relay_quick_start.py
index fd7f5aa9d7..8910817c21 100644
--- a/gallery/tutorial/relay_quick_start.py
+++ b/gallery/tutorial/relay_quick_start.py
@@ -26,6 +26,12 @@ generates a runtime library for Nvidia GPU with TVM.
 Notice that you need to build TVM with cuda and llvm enabled.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 ######################################################################
 # Overview for Supported Hardware Backend of TVM
 # ----------------------------------------------
diff --git a/gallery/tutorial/tensor_expr_get_started.py 
b/gallery/tutorial/tensor_expr_get_started.py
index 25ea4e8a55..11186d2f14 100644
--- a/gallery/tutorial/tensor_expr_get_started.py
+++ b/gallery/tutorial/tensor_expr_get_started.py
@@ -39,6 +39,12 @@ serve as the comparative basis for future tutorials covering 
more advanced
 features of TVM.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 
################################################################################
 # Example 1: Writing and Scheduling Vector Addition in TE for CPU
 # ---------------------------------------------------------------
diff --git a/gallery/tutorial/tensor_ir_blitz_course.py 
b/gallery/tutorial/tensor_ir_blitz_course.py
index 11edc7ae9f..a62fa39793 100644
--- a/gallery/tutorial/tensor_ir_blitz_course.py
+++ b/gallery/tutorial/tensor_ir_blitz_course.py
@@ -29,6 +29,12 @@ TensorIR is a domain specific language for deep learning 
programs serving two br
 
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 import tvm
 from tvm.ir.module import IRModule
 from tvm.script import tir as T
diff --git a/gallery/tutorial/tvmc_command_line_driver.py 
b/gallery/tutorial/tvmc_command_line_driver.py
index 48e3703beb..ad5b37190c 100644
--- a/gallery/tutorial/tvmc_command_line_driver.py
+++ b/gallery/tutorial/tvmc_command_line_driver.py
@@ -41,6 +41,12 @@ The goal of this section is to give you an overview of TVM 
and TVMC's
 capabilities, and set the stage for understanding how TVM works.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 
################################################################################
 # Using TVMC
 # ----------
diff --git a/gallery/tutorial/tvmc_python.py b/gallery/tutorial/tvmc_python.py
index 6efc565f0a..28b0a97450 100644
--- a/gallery/tutorial/tvmc_python.py
+++ b/gallery/tutorial/tvmc_python.py
@@ -36,6 +36,12 @@ Follow the steps to download a resnet model via the terminal:
 Let's start editing the python file in your favorite text editor.
 """
 
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+
 
################################################################################
 # Step 0: Imports
 # ~~~~~~~~~~~~~~~
diff --git a/python/tvm/testing/utils.py b/python/tvm/testing/utils.py
index 5a6ded9bcb..96275e2af6 100644
--- a/python/tvm/testing/utils.py
+++ b/python/tvm/testing/utils.py
@@ -76,6 +76,7 @@ import shutil
 import sys
 import time
 
+from pathlib import Path
 from typing import Optional, Callable, Union, List
 
 import pytest
@@ -93,6 +94,7 @@ from tvm.error import TVMError
 
 
 SKIP_SLOW_TESTS = os.getenv("SKIP_SLOW_TESTS", "").lower() in {"true", "1", 
"yes"}
+IS_IN_CI = os.getenv("CI", "") == "true"
 
 skip_if_wheel_test = pytest.mark.skipif(
     os.getenv("WHEEL_TEST") is not None, reason="Test not supported in wheel."
@@ -1613,6 +1615,51 @@ def is_ampere_or_newer():
     return major >= 8
 
 
+def install_request_hook(depth: int) -> None:
+    """Add a wrapper around urllib.request for CI tests"""
+    if not IS_IN_CI:
+        return
+
+    # 
https://sphinx-gallery.github.io/stable/faq.html#why-is-file-not-defined-what-can-i-use
+    base = None
+    msg = ""
+    try:
+        base = __file__
+        msg += f"found file {__file__}\n"
+    except NameError:
+        msg += f"no file\n"
+
+    if base is None:
+        hook_script_dir = Path.cwd().resolve()
+        msg += "used path.cwd()\n"
+    else:
+        hook_script_dir = Path(base).resolve().parent
+        msg += "used base()\n"
+
+    msg += f"using depth {depth}\n"
+    if depth <= 0:
+        raise ValueError(f"depth less than 1 not supported, found: {depth}")
+
+    # Go up the parent directories
+    while depth > 0:
+        msg += f"[depth={depth}] dir={hook_script_dir}\n"
+        hook_script_dir = hook_script_dir.parent
+        depth -= 1
+
+    # Ensure the specified dir is valid
+    hook_script_dir = hook_script_dir / "tests" / "scripts" / "request_hook"
+    if not hook_script_dir.exists():
+        raise RuntimeError(f"Directory {hook_script_dir} does not 
exist:\n{msg}")
+
+    # Import the hook and start it up (it's not included here directly to avoid
+    # keeping a database of URLs inside the tvm Python package
+    sys.path.append(str(hook_script_dir))
+    # This import is intentionally delayed since it should only happen in CI
+    import request_hook  # pylint: disable=import-outside-toplevel
+
+    request_hook.init()
+
+
 def main():
     test_file = inspect.getsourcefile(sys._getframe(1))
     sys.exit(pytest.main([test_file] + sys.argv[1:]))
diff --git a/tests/lint/check_request_hook.py b/tests/lint/check_request_hook.py
new file mode 100644
index 0000000000..6e5c523d11
--- /dev/null
+++ b/tests/lint/check_request_hook.py
@@ -0,0 +1,92 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import argparse
+import fnmatch
+import re
+from pathlib import Path
+
+
+REPO_ROOT = Path(__file__).resolve().parent.parent.parent
+EXPECTED = """
+# sphinx_gallery_start_ignore
+from tvm import testing
+
+testing.utils.install_request_hook(depth=3)
+# sphinx_gallery_end_ignore
+""".rstrip()
+IGNORE_PATTERNS = ["*/micro_tvmc.py", "*/micro_train.py"]
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(
+        description="Check that all tutorials/docs override 
urllib.request.Request"
+    )
+    parser.add_argument(
+        "--fix", action="store_true", help="Insert expected code into erroring 
files"
+    )
+    args = parser.parse_args()
+
+    gallery_files = (REPO_ROOT / "gallery").glob("**/*.py")
+
+    errors = []
+    for file in gallery_files:
+        skip = False
+        for ignored_file in IGNORE_PATTERNS:
+            if fnmatch.fnmatch(str(file), ignored_file):
+                skip = True
+                break
+        if skip:
+            continue
+
+        with open(file) as f:
+            content = f.read()
+
+        if EXPECTED not in content:
+            errors.append(file)
+
+    if args.fix:
+        for error in errors:
+            with open(error) as f:
+                content = f.read()
+
+            if "from __future__" in content:
+                # Place after the last __future__ import
+                new_content = re.sub(
+                    r"((?:from __future__.*?\n)+)", r"\1\n" + EXPECTED, 
content, flags=re.MULTILINE
+                )
+            else:
+                # Place after the module doc comment
+                new_content = re.sub(
+                    r"(\"\"\"(?:.*\n)+\"\"\")", r"\1\n" + EXPECTED, content, 
flags=re.MULTILINE
+                )
+
+            with open(error, "w") as f:
+                f.write(new_content)
+    else:
+        # Don't fix, just check and print an error message
+        if len(errors) > 0:
+            print(
+                f"These {len(errors)} files did not contain the expected text 
to "
+                "override urllib.request.Request.\n"
+                "You can run 'python3 tests/lint/check_request_hook.py --fix' 
to "
+                "automatically fix these errors:\n"
+                f"{EXPECTED}\n\nFiles:\n" + "\n".join([str(error_path) for 
error_path in errors])
+            )
+            exit(1)
+        else:
+            print("All files successfully override urllib.request.Request")
+            exit(0)
diff --git a/tests/scripts/request_hook/request_hook.py 
b/tests/scripts/request_hook/request_hook.py
new file mode 100644
index 0000000000..f24f76869e
--- /dev/null
+++ b/tests/scripts/request_hook/request_hook.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import urllib.request
+import logging
+
+LOGGER = None
+
+
+# To update this list, run the workflow <HERE> with the URL to download and 
the SHA512 of the file
+BASE = "https://tvm-ci-resources.s3.us-west-2.amazonaws.com";
+URL_MAP = {
+    
"https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ResNet/resnet18.zip":
 f"{BASE}/oneflow/resnet18.zip",
+    "https://homes.cs.washington.edu/~cyulin/media/gnn_model/gcn_cora.torch": 
f"{BASE}/gcn_cora.torch",
+    "https://homes.cs.washington.edu/~moreau/media/vta/cat.jpg": 
f"{BASE}/vta_cat.jpg",
+    "https://people.linaro.org/~tom.gall/sine_model.tflite": 
f"{BASE}/sine_model.tflite",
+    "https://pjreddie.com/media/files/yolov3-tiny.weights?raw=true": 
f"{BASE}/yolov3-tiny.weights",
+    "https://pjreddie.com/media/files/yolov3.weights": 
f"{BASE}/yolov3.weights",
+    
"http://data.mxnet.io.s3-website-us-west-1.amazonaws.com/data/val_256_q90.rec": 
f"{BASE}/mxnet-val_256_q90.rec",
+    
"http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz":
 f"{BASE}/tf-mobilenet_v1_1.0_224.tgz",
+    "http://images.cocodataset.org/zips/val2017.zip": 
f"{BASE}/cocodataset-val2017.zip",
+    "https://bj.bcebos.com/x2paddle/models/paddle_resnet50.tar": 
f"{BASE}/bcebos-paddle_resnet50.tar",
+    "https://data.deepai.org/stanfordcars.zip": 
f"{BASE}/deepai-stanfordcars.zip",
+}
+
+
+class TvmRequestHook(urllib.request.Request):
+    def __init__(self, url, *args, **kwargs):
+        LOGGER.info(f"Caught access to {url}")
+        if url in URL_MAP:
+            new_url = URL_MAP[url]
+            LOGGER.info(f"Mapped URL {url} to {new_url}")
+        else:
+            new_url = url
+        super().__init__(new_url, *args, **kwargs)
+
+
+def init():
+    global LOGGER
+    urllib.request.Request = TvmRequestHook
+    LOGGER = logging.getLogger("tvm_request_hook")
+    LOGGER.setLevel(logging.DEBUG)
+    fh = logging.FileHandler("redirected_urls.log")
+    fh.setLevel(logging.DEBUG)
+    LOGGER.addHandler(fh)
diff --git a/tests/scripts/task_lint.sh b/tests/scripts/task_lint.sh
index a05f7ca36b..84f4652337 100755
--- a/tests/scripts/task_lint.sh
+++ b/tests/scripts/task_lint.sh
@@ -40,6 +40,9 @@ function shard1 {
   echo "Checking CMake <-> LibInfo options mirroring"
   python3 tests/lint/check_cmake_options.py
 
+  echo "Checking that all sphinx-gallery docs override urllib.request.Request"
+  python3 tests/lint/check_request_hook.py
+
   echo "black check..."
   tests/lint/git-black.sh
 

Reply via email to